diff options
author | Lorry <lorry@roadtrain.codethink.co.uk> | 2012-08-15 17:04:08 +0100 |
---|---|---|
committer | Lorry <lorry@roadtrain.codethink.co.uk> | 2012-08-15 17:04:08 +0100 |
commit | 35a870a7a23c7d6076c658c1ec2122b642f264ed (patch) | |
tree | b9ce5b562cf1a297844a7983c6b3fe33bb536d23 /doc | |
download | check-35a870a7a23c7d6076c658c1ec2122b642f264ed.tar.gz |
Tarball conversion
Diffstat (limited to 'doc')
28 files changed, 5412 insertions, 0 deletions
diff --git a/doc/Makefile.am b/doc/Makefile.am new file mode 100644 index 0000000..ac1c254 --- /dev/null +++ b/doc/Makefile.am @@ -0,0 +1,122 @@ +## Process this file with automake to produce Makefile.in + +info_TEXINFOS = check.texi +check_TEXINFOS = fdl.texi + +## we need to include several diffs as we evolve the example in the +## tutorial. this means we'll generate them from the example source. + +$(srcdir)/check.texi: money.1-2.h.diff \ + money.1-3.c.diff \ + money.3-4.c.diff \ + money.4-5.c.diff \ + money.5-6.c.diff \ + check_money.1-2.c.diff \ + check_money.2-3.c.diff \ + check_money.3-6.c.diff \ + check_money.6-7.c.diff + +eg_root = $(top_srcdir)/doc/example +eg_src = $(eg_root)/src +eg_tests = $(eg_root)/tests + +## now a rule for each diff. the redundancy here can probably be +## parameterized, but I don't know how. if you know, please tell us! + +# diff returns 1 if there is a difference, but we don't want make to +# think that means there is an error +money.1-2.h.diff: $(eg_src)/money.1.h $(eg_src)/money.2.h + cd $(eg_root); \ + diff -u src/money.1.h src/money.2.h > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.1-3.c.diff: $(eg_src)/money.1.c $(eg_src)/money.3.c + cd $(eg_root); \ + diff -u src/money.1.c src/money.3.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.3-4.c.diff: $(eg_src)/money.3.c $(eg_src)/money.4.c + cd $(eg_root); \ + diff -u src/money.3.c src/money.4.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.4-5.c.diff: $(eg_src)/money.4.c $(eg_src)/money.5.c + cd $(eg_root); \ + diff -u src/money.4.c src/money.5.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.5-6.c.diff: $(eg_src)/money.5.c $(eg_src)/money.6.c + cd $(eg_root); \ + diff -u src/money.5.c src/money.6.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.1-2.c.diff: $(eg_tests)/check_money.1.c $(eg_tests)/check_money.2.c + cd $(eg_root); \ + diff -u tests/check_money.1.c tests/check_money.2.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.2-3.c.diff: $(eg_tests)/check_money.2.c $(eg_tests)/check_money.3.c + cd $(eg_root); \ + diff -u tests/check_money.2.c tests/check_money.3.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.3-6.c.diff: $(eg_tests)/check_money.3.c $(eg_tests)/check_money.6.c + cd $(eg_root); \ + diff -u tests/check_money.3.c tests/check_money.6.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.6-7.c.diff: $(eg_tests)/check_money.6.c $(eg_tests)/check_money.7.c + cd $(eg_root); \ + diff -u tests/check_money.6.c tests/check_money.7.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +# explicitly list every file in the example. + +example_docs = example/Makefile.am \ + example/README \ + example/configure.ac + +example_src_docs = example/src/Makefile.am \ + example/src/main.c \ + example/src/money.c \ + example/src/money.h \ + example/src/money.1.h \ + example/src/money.2.h \ + example/src/money.1.c \ + example/src/money.3.c \ + example/src/money.4.c \ + example/src/money.5.c \ + example/src/money.6.c + +example_tests_docs = example/tests/Makefile.am \ + example/tests/check_money.c \ + example/tests/check_money.1.c \ + example/tests/check_money.2.c \ + example/tests/check_money.3.c \ + example/tests/check_money.6.c \ + example/tests/check_money.7.c + +## what to clean + +CLEANFILES = *~ *.diff + +## what to distribute + +EXTRA_DIST = $(example_docs) \ + $(example_src_docs) \ + $(example_tests_docs) + +## what to install + +docdir = $(datadir)/doc/$(PACKAGE) + +# install money example + +exampledir = $(docdir)/example +example_DATA = $(example_docs) + +examplesrcdir = $(docdir)/example/src +examplesrc_DATA = $(example_src_docs) + +exampletestsdir = $(docdir)/example/tests +exampletests_DATA = $(example_tests_docs) diff --git a/doc/Makefile.in b/doc/Makefile.in new file mode 100644 index 0000000..0bf46f9 --- /dev/null +++ b/doc/Makefile.in @@ -0,0 +1,777 @@ +# Makefile.in generated by automake 1.10.2 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = doc +DIST_COMMON = $(check_TEXINFOS) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in $(srcdir)/stamp-vti \ + $(srcdir)/version.texi +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/acx_pthread.m4 \ + $(top_srcdir)/m4/ax_c_check_flag.m4 \ + $(top_srcdir)/m4/ax_cflags_warn_all_ansi.m4 \ + $(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \ + $(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \ + $(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/config.h +CONFIG_CLEAN_FILES = +SOURCES = +DIST_SOURCES = +INFO_DEPS = $(srcdir)/check.info +TEXINFO_TEX = $(top_srcdir)/build-aux/texinfo.tex +am__TEXINFO_TEX_DIR = $(top_srcdir)/build-aux +DVIS = check.dvi +PDFS = check.pdf +PSS = check.ps +HTMLS = check.html +TEXINFOS = check.texi +TEXI2DVI = texi2dvi +TEXI2PDF = $(TEXI2DVI) --pdf --batch +MAKEINFOHTML = $(MAKEINFO) --html +AM_MAKEINFOHTMLFLAGS = $(AM_MAKEINFOFLAGS) +DVIPS = dvips +am__installdirs = "$(DESTDIR)$(infodir)" "$(DESTDIR)$(exampledir)" \ + "$(DESTDIR)$(examplesrcdir)" "$(DESTDIR)$(exampletestsdir)" +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = `echo $$p | sed -e 's|^.*/||'`; +exampleDATA_INSTALL = $(INSTALL_DATA) +examplesrcDATA_INSTALL = $(INSTALL_DATA) +exampletestsDATA_INSTALL = $(INSTALL_DATA) +DATA = $(example_DATA) $(examplesrc_DATA) $(exampletests_DATA) +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CHECK_MAJOR_VERSION = @CHECK_MAJOR_VERSION@ +CHECK_MICRO_VERSION = @CHECK_MICRO_VERSION@ +CHECK_MINOR_VERSION = @CHECK_MINOR_VERSION@ +CHECK_VERSION = @CHECK_VERSION@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ENABLE_SUBUNIT = @ENABLE_SUBUNIT@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GCOV = @GCOV@ +GCOV_CFLAGS = @GCOV_CFLAGS@ +GCOV_LIBS = @GCOV_LIBS@ +GENHTML = @GENHTML@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LCOV = @LCOV@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +TEX = @TEX@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +acx_pthread_config = @acx_pthread_config@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = $(datadir)/doc/$(PACKAGE) +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +lt_ECHO = @lt_ECHO@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +info_TEXINFOS = check.texi +check_TEXINFOS = fdl.texi +eg_root = $(top_srcdir)/doc/example +eg_src = $(eg_root)/src +eg_tests = $(eg_root)/tests + +# explicitly list every file in the example. +example_docs = example/Makefile.am \ + example/README \ + example/configure.ac + +example_src_docs = example/src/Makefile.am \ + example/src/main.c \ + example/src/money.c \ + example/src/money.h \ + example/src/money.1.h \ + example/src/money.2.h \ + example/src/money.1.c \ + example/src/money.3.c \ + example/src/money.4.c \ + example/src/money.5.c \ + example/src/money.6.c + +example_tests_docs = example/tests/Makefile.am \ + example/tests/check_money.c \ + example/tests/check_money.1.c \ + example/tests/check_money.2.c \ + example/tests/check_money.3.c \ + example/tests/check_money.6.c \ + example/tests/check_money.7.c + +CLEANFILES = *~ *.diff +EXTRA_DIST = $(example_docs) \ + $(example_src_docs) \ + $(example_tests_docs) + + +# install money example +exampledir = $(docdir)/example +example_DATA = $(example_docs) +examplesrcdir = $(docdir)/example/src +examplesrc_DATA = $(example_src_docs) +exampletestsdir = $(docdir)/example/tests +exampletests_DATA = $(example_tests_docs) +all: all-am + +.SUFFIXES: +.SUFFIXES: .dvi .html .info .pdf .ps .texi +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnits doc/Makefile'; \ + cd $(top_srcdir) && \ + $(AUTOMAKE) --gnits doc/Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +.texi.info: + restore=: && backupdir="$(am__leading_dot)am$$$$" && \ + am__cwd=`pwd` && cd $(srcdir) && \ + rm -rf $$backupdir && mkdir $$backupdir && \ + if ($(MAKEINFO) --version) >/dev/null 2>&1; then \ + for f in $@ $@-[0-9] $@-[0-9][0-9] $(@:.info=).i[0-9] $(@:.info=).i[0-9][0-9]; do \ + if test -f $$f; then mv $$f $$backupdir; restore=mv; else :; fi; \ + done; \ + else :; fi && \ + cd "$$am__cwd"; \ + if $(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) \ + -o $@ $<; \ + then \ + rc=0; \ + cd $(srcdir); \ + else \ + rc=$$?; \ + cd $(srcdir) && \ + $$restore $$backupdir/* `echo "./$@" | sed 's|[^/]*$$||'`; \ + fi; \ + rm -rf $$backupdir; exit $$rc + +.texi.dvi: + TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ + MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir)' \ + $(TEXI2DVI) $< + +.texi.pdf: + TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ + MAKEINFO='$(MAKEINFO) $(AM_MAKEINFOFLAGS) $(MAKEINFOFLAGS) -I $(srcdir)' \ + $(TEXI2PDF) $< + +.texi.html: + rm -rf $(@:.html=.htp) + if $(MAKEINFOHTML) $(AM_MAKEINFOHTMLFLAGS) $(MAKEINFOFLAGS) -I $(srcdir) \ + -o $(@:.html=.htp) $<; \ + then \ + rm -rf $@; \ + if test ! -d $(@:.html=.htp) && test -d $(@:.html=); then \ + mv $(@:.html=) $@; else mv $(@:.html=.htp) $@; fi; \ + else \ + if test ! -d $(@:.html=.htp) && test -d $(@:.html=); then \ + rm -rf $(@:.html=); else rm -Rf $(@:.html=.htp) $@; fi; \ + exit 1; \ + fi +$(srcdir)/check.info: check.texi $(srcdir)/version.texi $(check_TEXINFOS) +check.dvi: check.texi $(srcdir)/version.texi $(check_TEXINFOS) +check.pdf: check.texi $(srcdir)/version.texi $(check_TEXINFOS) +check.html: check.texi $(srcdir)/version.texi $(check_TEXINFOS) +$(srcdir)/version.texi: $(srcdir)/stamp-vti +$(srcdir)/stamp-vti: check.texi $(top_srcdir)/configure + @(dir=.; test -f ./check.texi || dir=$(srcdir); \ + set `$(SHELL) $(top_srcdir)/build-aux/mdate-sh $$dir/check.texi`; \ + echo "@set UPDATED $$1 $$2 $$3"; \ + echo "@set UPDATED-MONTH $$2 $$3"; \ + echo "@set EDITION $(VERSION)"; \ + echo "@set VERSION $(VERSION)") > vti.tmp + @cmp -s vti.tmp $(srcdir)/version.texi \ + || (echo "Updating $(srcdir)/version.texi"; \ + cp vti.tmp $(srcdir)/version.texi) + -@rm -f vti.tmp + @cp $(srcdir)/version.texi $@ + +mostlyclean-vti: + -rm -f vti.tmp + +maintainer-clean-vti: + -rm -f $(srcdir)/stamp-vti $(srcdir)/version.texi +.dvi.ps: + TEXINPUTS="$(am__TEXINFO_TEX_DIR)$(PATH_SEPARATOR)$$TEXINPUTS" \ + $(DVIPS) -o $@ $< + +uninstall-dvi-am: + @$(NORMAL_UNINSTALL) + @list='$(DVIS)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(dvidir)/$$f'"; \ + rm -f "$(DESTDIR)$(dvidir)/$$f"; \ + done + +uninstall-html-am: + @$(NORMAL_UNINSTALL) + @list='$(HTMLS)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -rf '$(DESTDIR)$(htmldir)/$$f'"; \ + rm -rf "$(DESTDIR)$(htmldir)/$$f"; \ + done + +uninstall-info-am: + @$(PRE_UNINSTALL) + @if test -d '$(DESTDIR)$(infodir)' && \ + (install-info --version && \ + install-info --version 2>&1 | sed 1q | grep -i -v debian) >/dev/null 2>&1; then \ + list='$(INFO_DEPS)'; \ + for file in $$list; do \ + relfile=`echo "$$file" | sed 's|^.*/||'`; \ + echo " install-info --info-dir='$(DESTDIR)$(infodir)' --remove '$(DESTDIR)$(infodir)/$$relfile'"; \ + install-info --info-dir="$(DESTDIR)$(infodir)" --remove "$(DESTDIR)$(infodir)/$$relfile"; \ + done; \ + else :; fi + @$(NORMAL_UNINSTALL) + @list='$(INFO_DEPS)'; \ + for file in $$list; do \ + relfile=`echo "$$file" | sed 's|^.*/||'`; \ + relfile_i=`echo "$$relfile" | sed 's|\.info$$||;s|$$|.i|'`; \ + (if test -d "$(DESTDIR)$(infodir)" && cd "$(DESTDIR)$(infodir)"; then \ + echo " cd '$(DESTDIR)$(infodir)' && rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]"; \ + rm -f $$relfile $$relfile-[0-9] $$relfile-[0-9][0-9] $$relfile_i[0-9] $$relfile_i[0-9][0-9]; \ + else :; fi); \ + done + +uninstall-pdf-am: + @$(NORMAL_UNINSTALL) + @list='$(PDFS)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(pdfdir)/$$f'"; \ + rm -f "$(DESTDIR)$(pdfdir)/$$f"; \ + done + +uninstall-ps-am: + @$(NORMAL_UNINSTALL) + @list='$(PSS)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(psdir)/$$f'"; \ + rm -f "$(DESTDIR)$(psdir)/$$f"; \ + done + +dist-info: $(INFO_DEPS) + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + list='$(INFO_DEPS)'; \ + for base in $$list; do \ + case $$base in \ + $(srcdir)/*) base=`echo "$$base" | sed "s|^$$srcdirstrip/||"`;; \ + esac; \ + if test -f $$base; then d=.; else d=$(srcdir); fi; \ + base_i=`echo "$$base" | sed 's|\.info$$||;s|$$|.i|'`; \ + for file in $$d/$$base $$d/$$base-[0-9] $$d/$$base-[0-9][0-9] $$d/$$base_i[0-9] $$d/$$base_i[0-9][0-9]; do \ + if test -f $$file; then \ + relfile=`expr "$$file" : "$$d/\(.*\)"`; \ + test -f $(distdir)/$$relfile || \ + cp -p $$file $(distdir)/$$relfile; \ + else :; fi; \ + done; \ + done + +mostlyclean-aminfo: + -rm -rf check.aux check.cp check.cps check.fn check.ky check.kys check.log \ + check.pg check.pgs check.tmp check.toc check.tp check.vr \ + check.dvi check.pdf check.ps check.html + +maintainer-clean-aminfo: + @list='$(INFO_DEPS)'; for i in $$list; do \ + i_i=`echo "$$i" | sed 's|\.info$$||;s|$$|.i|'`; \ + echo " rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]"; \ + rm -f $$i $$i-[0-9] $$i-[0-9][0-9] $$i_i[0-9] $$i_i[0-9][0-9]; \ + done +install-exampleDATA: $(example_DATA) + @$(NORMAL_INSTALL) + test -z "$(exampledir)" || $(MKDIR_P) "$(DESTDIR)$(exampledir)" + @list='$(example_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(exampleDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(exampledir)/$$f'"; \ + $(exampleDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(exampledir)/$$f"; \ + done + +uninstall-exampleDATA: + @$(NORMAL_UNINSTALL) + @list='$(example_DATA)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(exampledir)/$$f'"; \ + rm -f "$(DESTDIR)$(exampledir)/$$f"; \ + done +install-examplesrcDATA: $(examplesrc_DATA) + @$(NORMAL_INSTALL) + test -z "$(examplesrcdir)" || $(MKDIR_P) "$(DESTDIR)$(examplesrcdir)" + @list='$(examplesrc_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(examplesrcDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(examplesrcdir)/$$f'"; \ + $(examplesrcDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(examplesrcdir)/$$f"; \ + done + +uninstall-examplesrcDATA: + @$(NORMAL_UNINSTALL) + @list='$(examplesrc_DATA)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(examplesrcdir)/$$f'"; \ + rm -f "$(DESTDIR)$(examplesrcdir)/$$f"; \ + done +install-exampletestsDATA: $(exampletests_DATA) + @$(NORMAL_INSTALL) + test -z "$(exampletestsdir)" || $(MKDIR_P) "$(DESTDIR)$(exampletestsdir)" + @list='$(exampletests_DATA)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(exampletestsDATA_INSTALL) '$$d$$p' '$(DESTDIR)$(exampletestsdir)/$$f'"; \ + $(exampletestsDATA_INSTALL) "$$d$$p" "$(DESTDIR)$(exampletestsdir)/$$f"; \ + done + +uninstall-exampletestsDATA: + @$(NORMAL_UNINSTALL) + @list='$(exampletests_DATA)'; for p in $$list; do \ + f=$(am__strip_dir) \ + echo " rm -f '$(DESTDIR)$(exampletestsdir)/$$f'"; \ + rm -f "$(DESTDIR)$(exampletestsdir)/$$f"; \ + done +tags: TAGS +TAGS: + +ctags: CTAGS +CTAGS: + + +distdir: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ + fi; \ + cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ + else \ + test -f $(distdir)/$$file \ + || cp -p $$d/$$file $(distdir)/$$file \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-info +check-am: all-am +check: check-am +all-am: Makefile $(INFO_DEPS) $(DATA) +installdirs: + for dir in "$(DESTDIR)$(infodir)" "$(DESTDIR)$(exampledir)" "$(DESTDIR)$(examplesrcdir)" "$(DESTDIR)$(exampletestsdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + `test -z '$(STRIP)' || \ + echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: $(DVIS) + +html: html-am + +html-am: $(HTMLS) + +info: info-am + +info-am: $(INFO_DEPS) + +install-data-am: install-exampleDATA install-examplesrcDATA \ + install-exampletestsDATA install-info-am + +install-dvi: install-dvi-am + +install-dvi-am: $(DVIS) + @$(NORMAL_INSTALL) + test -z "$(dvidir)" || $(MKDIR_P) "$(DESTDIR)$(dvidir)" + @list='$(DVIS)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(dvidir)/$$f'"; \ + $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(dvidir)/$$f"; \ + done +install-exec-am: + +install-html: install-html-am + +install-html-am: $(HTMLS) + @$(NORMAL_INSTALL) + test -z "$(htmldir)" || $(MKDIR_P) "$(DESTDIR)$(htmldir)" + @list='$(HTMLS)'; for p in $$list; do \ + if test -f "$$p" || test -d "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + if test -d "$$d$$p"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(htmldir)/$$f'"; \ + $(MKDIR_P) "$(DESTDIR)$(htmldir)/$$f" || exit 1; \ + echo " $(INSTALL_DATA) '$$d$$p'/* '$(DESTDIR)$(htmldir)/$$f'"; \ + $(INSTALL_DATA) "$$d$$p"/* "$(DESTDIR)$(htmldir)/$$f"; \ + else \ + echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(htmldir)/$$f'"; \ + $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(htmldir)/$$f"; \ + fi; \ + done +install-info: install-info-am + +install-info-am: $(INFO_DEPS) + @$(NORMAL_INSTALL) + test -z "$(infodir)" || $(MKDIR_P) "$(DESTDIR)$(infodir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ + list='$(INFO_DEPS)'; \ + for file in $$list; do \ + case $$file in \ + $(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ + esac; \ + if test -f $$file; then d=.; else d=$(srcdir); fi; \ + file_i=`echo "$$file" | sed 's|\.info$$||;s|$$|.i|'`; \ + for ifile in $$d/$$file $$d/$$file-[0-9] $$d/$$file-[0-9][0-9] \ + $$d/$$file_i[0-9] $$d/$$file_i[0-9][0-9] ; do \ + if test -f $$ifile; then \ + relfile=`echo "$$ifile" | sed 's|^.*/||'`; \ + echo " $(INSTALL_DATA) '$$ifile' '$(DESTDIR)$(infodir)/$$relfile'"; \ + $(INSTALL_DATA) "$$ifile" "$(DESTDIR)$(infodir)/$$relfile"; \ + else : ; fi; \ + done; \ + done + @$(POST_INSTALL) + @if (install-info --version && \ + install-info --version 2>&1 | sed 1q | grep -i -v debian) >/dev/null 2>&1; then \ + list='$(INFO_DEPS)'; \ + for file in $$list; do \ + relfile=`echo "$$file" | sed 's|^.*/||'`; \ + echo " install-info --info-dir='$(DESTDIR)$(infodir)' '$(DESTDIR)$(infodir)/$$relfile'";\ + install-info --info-dir="$(DESTDIR)$(infodir)" "$(DESTDIR)$(infodir)/$$relfile" || :;\ + done; \ + else : ; fi +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: $(PDFS) + @$(NORMAL_INSTALL) + test -z "$(pdfdir)" || $(MKDIR_P) "$(DESTDIR)$(pdfdir)" + @list='$(PDFS)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(pdfdir)/$$f'"; \ + $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(pdfdir)/$$f"; \ + done +install-ps: install-ps-am + +install-ps-am: $(PSS) + @$(NORMAL_INSTALL) + test -z "$(psdir)" || $(MKDIR_P) "$(DESTDIR)$(psdir)" + @list='$(PSS)'; for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + f=$(am__strip_dir) \ + echo " $(INSTALL_DATA) '$$d$$p' '$(DESTDIR)$(psdir)/$$f'"; \ + $(INSTALL_DATA) "$$d$$p" "$(DESTDIR)$(psdir)/$$f"; \ + done +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-aminfo \ + maintainer-clean-generic maintainer-clean-vti + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-aminfo mostlyclean-generic \ + mostlyclean-libtool mostlyclean-vti + +pdf: pdf-am + +pdf-am: $(PDFS) + +ps: ps-am + +ps-am: $(PSS) + +uninstall-am: uninstall-dvi-am uninstall-exampleDATA \ + uninstall-examplesrcDATA uninstall-exampletestsDATA \ + uninstall-html-am uninstall-info-am uninstall-pdf-am \ + uninstall-ps-am + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + dist-info distclean distclean-generic distclean-libtool \ + distdir dvi dvi-am html html-am info info-am install \ + install-am install-data install-data-am install-dvi \ + install-dvi-am install-exampleDATA install-examplesrcDATA \ + install-exampletestsDATA install-exec install-exec-am \ + install-html install-html-am install-info install-info-am \ + install-man install-pdf install-pdf-am install-ps \ + install-ps-am install-strip installcheck installcheck-am \ + installdirs maintainer-clean maintainer-clean-aminfo \ + maintainer-clean-generic maintainer-clean-vti mostlyclean \ + mostlyclean-aminfo mostlyclean-generic mostlyclean-libtool \ + mostlyclean-vti pdf pdf-am ps ps-am uninstall uninstall-am \ + uninstall-dvi-am uninstall-exampleDATA \ + uninstall-examplesrcDATA uninstall-exampletestsDATA \ + uninstall-html-am uninstall-info-am uninstall-pdf-am \ + uninstall-ps-am + + +$(srcdir)/check.texi: money.1-2.h.diff \ + money.1-3.c.diff \ + money.3-4.c.diff \ + money.4-5.c.diff \ + money.5-6.c.diff \ + check_money.1-2.c.diff \ + check_money.2-3.c.diff \ + check_money.3-6.c.diff \ + check_money.6-7.c.diff + +# diff returns 1 if there is a difference, but we don't want make to +# think that means there is an error +money.1-2.h.diff: $(eg_src)/money.1.h $(eg_src)/money.2.h + cd $(eg_root); \ + diff -u src/money.1.h src/money.2.h > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.1-3.c.diff: $(eg_src)/money.1.c $(eg_src)/money.3.c + cd $(eg_root); \ + diff -u src/money.1.c src/money.3.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.3-4.c.diff: $(eg_src)/money.3.c $(eg_src)/money.4.c + cd $(eg_root); \ + diff -u src/money.3.c src/money.4.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.4-5.c.diff: $(eg_src)/money.4.c $(eg_src)/money.5.c + cd $(eg_root); \ + diff -u src/money.4.c src/money.5.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +money.5-6.c.diff: $(eg_src)/money.5.c $(eg_src)/money.6.c + cd $(eg_root); \ + diff -u src/money.5.c src/money.6.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.1-2.c.diff: $(eg_tests)/check_money.1.c $(eg_tests)/check_money.2.c + cd $(eg_root); \ + diff -u tests/check_money.1.c tests/check_money.2.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.2-3.c.diff: $(eg_tests)/check_money.2.c $(eg_tests)/check_money.3.c + cd $(eg_root); \ + diff -u tests/check_money.2.c tests/check_money.3.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.3-6.c.diff: $(eg_tests)/check_money.3.c $(eg_tests)/check_money.6.c + cd $(eg_root); \ + diff -u tests/check_money.3.c tests/check_money.6.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; + +check_money.6-7.c.diff: $(eg_tests)/check_money.6.c $(eg_tests)/check_money.7.c + cd $(eg_root); \ + diff -u tests/check_money.6.c tests/check_money.7.c > @abs_builddir@/$@ || test $$? -eq 1; \ + cd -; +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/doc/check.info b/doc/check.info new file mode 100644 index 0000000..b8db723 --- /dev/null +++ b/doc/check.info @@ -0,0 +1,2037 @@ +This is check.info, produced by makeinfo version 4.11 from check.texi. + +This manual is for Check (version 0.9.8, 23 September 2009), a unit +testing framework for C. + + Copyright (C) 2001-2009 Arien Malec, Chris Pickett, Fredrik +Hugosson, and Robert Lemmen. + + Permission is granted to copy, distribute and/or modify this + document under the terms of the GNU Free Documentation License, + Version 1.2 or any later version published by the Free Software + Foundation; with no Invariant Sections, no Front-Cover texts, and + no Back-Cover Texts. A copy of the license is included in the + section entitled "GNU Free Documentation License." + +INFO-DIR-SECTION Software development +START-INFO-DIR-ENTRY +* Check: (check)Introduction. +END-INFO-DIR-ENTRY + + +File: check.info, Node: Top, Next: Introduction, Prev: (dir), Up: (dir) + +Check +***** + +This manual is for Check (version 0.9.8, 23 September 2009), a unit +testing framework for C. + + Copyright (C) 2001-2009 Arien Malec, Chris Pickett, Fredrik +Hugosson, and Robert Lemmen. + + Permission is granted to copy, distribute and/or modify this + document under the terms of the GNU Free Documentation License, + Version 1.2 or any later version published by the Free Software + Foundation; with no Invariant Sections, no Front-Cover texts, and + no Back-Cover Texts. A copy of the license is included in the + section entitled "GNU Free Documentation License." + + Please send corrections to this manual to <check-devel AT +lists.sourceforge.net>. We'd prefer it if you can send a unified diff +(`diff -u') against the `doc/check.texi' file that ships with Check, +but something is still better than nothing if you can't manage that. + +* Menu: + +* Introduction:: +* Unit Testing in C:: +* Tutorial:: +* Advanced Features:: +* Conclusion and References:: +* AM_PATH_CHECK:: +* Copying This Manual:: +* Index:: + + --- The Detailed Node Listing --- + +Unit Testing in C + +* Other Frameworks for C:: + +Tutorial: Basic Unit Testing + +* How to Write a Test:: +* Setting Up the Money Build:: +* Test a Little:: +* Creating a Suite:: +* SRunner Output:: + +Advanced Features + +* Running Multiple Cases:: +* No Fork Mode:: +* Test Fixtures:: +* Multiple Suites in one SRunner:: +* Testing Signal Handling and Exit Values:: +* Looping Tests:: +* Test Timeouts:: +* Determining Test Coverage:: +* Test Logging:: +* Subunit Support:: + +Test Fixtures + +* Test Fixture Examples:: +* Checked vs Unchecked Fixtures:: + +Test Logging + +* XML Logging:: + +Copying This Manual + +* GNU Free Documentation License:: License for copying this manual. + + +File: check.info, Node: Introduction, Next: Unit Testing in C, Prev: Top, Up: Top + +1 Introduction +************** + +Check is a unit testing framework for C. It was inspired by similar +frameworks that currently exist for most programming languages; the +most famous example being JUnit (http://www.junit.org) for Java. There +is a list of unit test frameworks for multiple languages at +`http://www.xprogramming.com/software.htm'. Unit testing has a long +history as part of formal quality assurance methodologies, but has +recently been associated with the lightweight methodology called +Extreme Programming. In that methodology, the characteristic practice +involves interspersing unit test writing with coding ("test a little, +code a little"). While the incremental unit test/code approach is +indispensable to Extreme Programming, it is also applicable, and +perhaps indispensable, outside of that methodology. + + The incremental test/code approach provides three main benefits to +the developer: + + 1. Because the unit tests use the interface to the unit being tested, + they allow the developer to think about how the interface should be + designed for usage early in the coding process. + + 2. They help the developer think early about aberrant cases, and code + accordingly. + + 3. By providing a documented level of correctness, they allow the + developer to refactor (see `http://www.refactoring.com') + aggressively. + + That third reason is the one that turns people into unit testing +addicts. There is nothing so satisfying as doing a wholesale +replacement of an implementation, and having the unit tests reassure +you at each step of that change that all is well. It is like the +difference between exploring the wilderness with and without a good map +and compass: without the proper gear, you are more likely to proceed +cautiously and stick to the marked trails; with it, you can take the +most direct path to where you want to go. + + Look at the Check homepage for the latest information on Check: +`http://check.sourceforge.net'. + + The Check project page is at: +`http://sourceforge.net/projects/check/'. + + +File: check.info, Node: Unit Testing in C, Next: Tutorial, Prev: Introduction, Up: Top + +2 Unit Testing in C +******************* + +C unit testing + + The approach to unit testing frameworks used for Check originated +with Smalltalk, which is a late binding object-oriented language +supporting reflection. Writing a framework for C requires solving some +special problems that frameworks for Smalltalk, Java or Python don't +have to face. In all of those language, the worst that a unit test can +do is fail miserably, throwing an exception of some sort. In C, a unit +test is just as likely to trash its address space as it is to fail to +meet its test requirements, and if the test framework sits in the same +address space, goodbye test framework. + + To solve this problem, Check uses the `fork()' system call to create +a new address space in which to run each unit test, and then uses +message queues to send information on the testing process back to the +test framework. That way, your unit test can do all sorts of nasty +things with pointers, and throw a segmentation fault, and the test +framework will happily note a unit test error, and chug along. + + The Check framework is also designed to play happily with common +development environments for C programming. The author designed Check +around Autoconf/Automake (thus the name Check: `make check' is the +idiom used for testing with Autoconf/Automake), and the test failure +messages thrown up by Check use the common idiom of +`filename:linenumber:message' used by `gcc' and family to report +problems in source code. With (X)Emacs, the output of Check allows one +to quickly navigate to the location of the unit test that failed; +presumably that also works in VI and IDEs. + +* Menu: + +* Other Frameworks for C:: + + +File: check.info, Node: Other Frameworks for C, Prev: Unit Testing in C, Up: Unit Testing in C + +2.1 Other Frameworks for C +========================== + +The authors know of the following additional unit testing frameworks +for C: + +AceUnit + AceUnit (Advanced C and Embedded Unit) bills itself as a + comfortable C code unit test framework. It tries to mimick JUnit + 4.x and includes reflection-like capabilities. AceUnit can be + used in resource constraint environments, e.g. embedded software + development, and importantly it runs fine in environments where + you cannot include a single standard header file and cannot invoke + a single standard C function from the ANSI / ISO C libraries. It + also has a Windows port. It does not use forks to trap signals, + although the authors have expressed interest in adding such a + feature. See the AceUnit homepage + (http://aceunit.sourceforge.net/). + +GNU Autounit + Much along the same lines as Check, including forking to run unit + tests in a separate address space (in fact, the original author of + Check borrowed the idea from GNU Autounit). GNU Autounit uses + GLib extensively, which means that linking and such need special + options, but this may not be a big problem to you, especially if + you are already using GTK or GLib. See the GNU Autounit homepage + (http://www.recursism.com/s2004/zp/products/gnu+autounit). + +cUnit + Also uses GLib, but does not fork to protect the address space of + unit tests. See the archived cUnit homepage + (http://web.archive.org/web/*/http://people.codefactory.se/~spotty/cunit/). + +CUnit + Standard C, with plans for a Win32 GUI implementation. Does not + currently fork or otherwise protect the address space of unit + tests. In early development. See the CUnit homepage + (http://cunit.sourceforge.net). + +CppUnit + The premier unit testing framework for C++; you can also use it to + test C code. It is stable, actively developed, and has a GUI + interface. The primary reasons not to use CppUnit for C are first + that it is quite big, and second you have to write your tests in + C++, which means you need a C++ compiler. If these don't sound + like concerns, it is definitely worth considering, along with + other C++ unit testing frameworks. See the CppUnit homepage + (http://cppunit.sourceforge.net/cppunit-wiki). + +embUnit + embUnit (Embedded Unit) is another unit test framework for embedded + systems. This one appears to be superseded by AceUnit. Embedded + Unit homepage (https://sourceforge.net/projects/embunit/). + +MinUnit + A minimal set of macros and that's it! The point is to show how + easy it is to unit test your code. See the MinUnit homepage + (http://www.jera.com/techinfo/jtns/jtn002.html). + +CUnit for Mr. Ando + A CUnit implementation that is fairly new, and apparently still in + early development. See the CUnit for Mr. Ando homepage + (http://park.ruru.ne.jp/ando/work/CUnitForAndo/html/). + + This list was last updated in March 2008. If you know of other C +unit test frameworks, please send an email plus description to +<check-devel AT lists.sourceforge.net> and we will add the entry to +this list. + + It is the authors' considered opinion that forking or otherwise +trapping and reporting signals is indispensable for unit testing (but +it probably wouldn't be hard to add that to frameworks without that +feature). Try 'em all out: adapt this tutorial to use all of the +frameworks above, and use whichever you like. Contribute, spread the +word, and make one a standard. Languages such as Java and Python are +fortunate to have standard unit testing frameworks; it would be +desirable that C have one as well. + + +File: check.info, Node: Tutorial, Next: Advanced Features, Prev: Unit Testing in C, Up: Top + +3 Tutorial: Basic Unit Testing +****************************** + +This tutorial will use the JUnit Test Infected +(http://junit.sourceforge.net/doc/testinfected/testing.htm) article as +a starting point. We will be creating a library to represent money, +`libmoney', that allows conversions between different currency types. +The development style will be "test a little, code a little", with unit +test writing preceding coding. This constantly gives us insights into +module usage, and also makes sure we are constantly thinking about how +to test our code. + +* Menu: + +* How to Write a Test:: +* Setting Up the Money Build:: +* Test a Little:: +* Creating a Suite:: +* SRunner Output:: + + +File: check.info, Node: How to Write a Test, Next: Setting Up the Money Build, Prev: Tutorial, Up: Tutorial + +3.1 How to Write a Test +======================= + +Test writing using Check is very simple. The file in which the checks +are defined must include `check.h' as so: + + #include <check.h> + + The basic unit test looks as follows: + + START_TEST (test_name) + { + /* unit test code */ + } + END_TEST + + The `START_TEST'/`END_TEST' pair are macros that setup basic +structures to permit testing. It is a mistake to leave off the +`END_TEST' marker; doing so produces all sorts of strange errors when +the check is compiled. + + +File: check.info, Node: Setting Up the Money Build, Next: Test a Little, Prev: How to Write a Test, Up: Tutorial + +3.2 Setting Up the Money Build +============================== + +Since we are creating a library to handle money, we will first create +an interface in `money.h', an implementation in `money.c', and a place +to store our unit tests, `check_money.c'. We want to integrate these +core files into our build system, and will need some additional +structure. To manage everything we'll use Autoconf, Automake, and +friends (collectively known as Autotools) for this example. One could +do something similar with ordinary Makefiles, but in the authors' +opinion, it is generally easier to use Autotools than bare Makefiles, +and they provide built-in support for running tests. + + Note that this is not the place to explain how Autotools works. If +you need help understanding what's going on beyond the explanations +here, the best place to start is probably Alexandre Duret-Lutz's +excellent Autotools tutorial +(http://www.lrde.epita.fr/~adl/autotools.html). + + The examples in this section are part of the Check distribution; you +don't need to spend time cutting and pasting or (worse) retyping them. +Locate the Check documentation on your system and look in the `example' +directory. The standard directory for GNU/Linux distributions should +be `/usr/share/doc/check/example'. This directory contains the final +version reached the end of the tutorial. If you want to follow along, +create backups of `money.h', `money.c', and `check_money.c', and then +delete the originals. + + We set up a directory structure as follows: + + . + |-- Makefile.am + |-- README + |-- configure.ac + |-- src + | |-- Makefile.am + | |-- main.c + | |-- money.c + | `-- money.h + `-- tests + |-- Makefile.am + `-- check_money.c + + Note that this is the output of `tree', a great directory +visualization tool. The top-level `Makefile.am' is simple; it merely +tells Automake how to process subdirectories: + + SUBDIRS = src . tests + + Note that `tests' comes last, because the code should be testing an +already compiled library. `configure.ac' is standard Autoconf +boilerplate, as specified by the Autotools tutorial and as suggested by +`autoscan'. The `AM_PATH_CHECK()' is the only line particular to Check +*note AM_PATH_CHECK::. + + `src/Makefile.am' builds `libmoney' as a Libtool archive, and links +it to an application simply called `main'. The application's behaviour +is not important to this tutorial; what's important is that none of the +functions we want to unit test appear in `main.c'; this probably means +that the only function in `main.c' should be `main()' itself. In order +to test the whole application, unit testing is not appropriate: you +should use a system testing tool like Autotest. If you really want to +test `main()' using Check, rename it to something like +`_myproject_main()' and write a wrapper around it. + + The primary build instructions for our unit tests are in +`tests/Makefile.am': + + ## Process this file with automake to produce Makefile.in + + TESTS = check_money + check_PROGRAMS = check_money + check_money_SOURCES = check_money.c $(top_builddir)/src/money.h + check_money_CFLAGS = @CHECK_CFLAGS@ + check_money_LDADD = $(top_builddir)/src/libmoney.la @CHECK_LIBS@ + + `TESTS' tells Automake which test programs to run for `make check'. +Similarly, the `check_' prefix in `check_PROGRAMS' actually comes from +Automake; it says to build these programs only when `make check' is +run. (Recall that Automake's `check' target is the origin of Check's +name.) The `check_money' test is a program that we will build from +`tests/check_money.c', linking it against both `src/libmoney.la' and +the installed `libcheck.la' on our system. The appropriate compiler +and linker flags for using Check are found in `@CHECK_CFLAGS@' and +`@CHECK_LIBS@', values defined by the `AM_PATH_CHECK' macro. + + Now that all this infrastructure is out of the way, we can get on +with development. `src/money.h' should only contain standard C header +boilerplate: + + #ifndef MONEY_H + #define MONEY_H + + #endif /* MONEY_H */ + + `src/money.c' should be empty, and `tests/check_money.c' should only +contain an empty `main()' function: + + int + main (void) + { + return 0; + } + + Create the GNU Build System for the project and then build `main' +and `libmoney.la' as follows: + + $ autoreconf --install + $ ./configure + $ make + + (`autoreconf' determines which commands are needed in order for +`configure' to be created or brought up to date. Previously one would +use a script called `autogen.sh' or `bootstrap', but that practice is +unnecessary now.) + + Now build and run the `check_money' test with `make check'. If all +goes well, `make' should report that our tests passed. No surprise, +because there aren't any tests to fail. If you have problems, make +sure to see *note AM_PATH_CHECK::. + + This was tested on the i386 "testing" distribution of Debian +GNU/Linux (etch) in March 2006, using Autoconf 2.59, Automake 1.9.6, +and Libtool 1.5.22. Please report any problems to <check-devel AT +lists.sourceforge.net>. + + +File: check.info, Node: Test a Little, Next: Creating a Suite, Prev: Setting Up the Money Build, Up: Tutorial + +3.3 Test a Little, Code a Little +================================ + +The Test Infected +(http://junit.sourceforge.net/doc/testinfected/testing.htm) article +starts out with a `Money' class, and so will we. Of course, we can't +do classes with C, but we don't really need to. The Test Infected +approach to writing code says that we should write the unit test +_before_ we write the code, and in this case, we will be even more +dogmatic and doctrinaire than the authors of Test Infected (who clearly +don't really get this stuff, only being some of the originators of the +Patterns approach to software development and OO design). + + Here are the changes to `check_money.c' for our first unit test: + + --- tests/check_money.1.c 2009-09-23 20:44:00.000000000 +1000 + +++ tests/check_money.2.c 2009-09-23 20:44:00.000000000 +1000 + @@ -1,3 +1,18 @@ + +#include <check.h> + +#include "../src/money.h" + + + +START_TEST (test_money_create) + +{ + + Money *m; + + m = money_create (5, "USD"); + + fail_unless (money_amount (m) == 5, + + "Amount not set correctly on creation"); + + fail_unless (strcmp (money_currency (m), "USD") == 0, + + "Currency not set correctly on creation"); + + money_free (m); + +} + +END_TEST + + + int + main (void) + { + + A unit test should just chug along and complete. If it exits early, +or is signaled, it will fail with a generic error message. (Note: it +is conceivable that you expect an early exit, or a signal and there is +functionality in Check to specifically assert that we should expect a +signal or an early exit.) If we want to get some information about +what failed, we need to use the `fail_unless()' function. The function +(actually a macro) takes a first Boolean argument, and an error message +to send if the condition is not true. + + If the Boolean argument is too complicated to elegantly express +within `fail_unless()', there is an alternate function `fail()' that +unconditionally fails. The second test inside `test_money_create' +above could be rewritten as follows: + + if (strcmp (money_currency (m), "USD") != 0) + { + fail ("Currency not set correctly on creation"); + } + + There is also a `fail_if()' function, which is the inverse of +`fail_unless()'. Using it, the above test then looks like this: + + fail_if (strcmp (money_currency (m), "USD") != 0, + "Currency not set correctly on creation"); + + For your convenience all fail functions also accepts NULL as the msg +argument and substitutes a suitable message for you. So you could also +write a test as follows: + + fail_unless (money_amount (m) == 5, NULL); + + This is equivalent to: + + fail_unless (money_amount (m) == 5, + "Assertion 'money_amount (m) == 5' failed"); + + All fail functions also support `varargs' and accept `printf'-style +format strings and arguments. This is especially useful while +debugging. With `printf'-style formatting the message could look like +this: + + fail_unless(money_amount (m) == 5, + "Amount was %d, instead of 5", money_amount (m)); + + When we try to compile and run the test suite now using `make +check', we get a whole host of compilation errors. It may seem a bit +strange to deliberately write code that won't compile, but notice what +we are doing: in creating the unit test, we are also defining +requirements for the money interface. Compilation errors are, in a +way, unit test failures of their own, telling us that the +implementation does not match the specification. If all we do is edit +the sources so that the unit test compiles, we are actually making +progress, guided by the unit tests, so that's what we will now do. + + We will patch our header `money.h' as follows: + + --- src/money.1.h 2009-09-23 20:44:00.000000000 +1000 + +++ src/money.2.h 2009-09-23 20:44:00.000000000 +1000 + @@ -1,4 +1,11 @@ + #ifndef MONEY_H + #define MONEY_H + + +typedef struct Money Money; + + + +Money *money_create (int amount, char *currency); + +int money_amount (Money * m); + +char *money_currency (Money * m); + +void money_free (Money * m); + + + #endif /* MONEY_H */ + + Our code compiles now, and again passes all of the tests. However, +once we try to _use_ the functions in `libmoney' in the `main()' of +`check_money', we'll run into more problems, as they haven't actually +been implemented yet. + + +File: check.info, Node: Creating a Suite, Next: SRunner Output, Prev: Test a Little, Up: Tutorial + +3.4 Creating a Suite +==================== + +To run unit tests with Check, we must create some test cases, aggregate +them into a suite, and run them with a suite runner. That's a bit of +overhead, but it is mostly one-off. Here's a diff for the new version +of `check_money.c'. Note that we include stdlib.h to get the +definitions of `EXIT_SUCCESS' and `EXIT_FAILURE'. + + --- tests/check_money.2.c 2009-09-23 20:44:00.000000000 +1000 + +++ tests/check_money.3.c 2009-09-23 20:44:00.000000000 +1000 + @@ -1,3 +1,4 @@ + +#include <stdlib.h> + #include <check.h> + #include "../src/money.h" + + @@ -13,8 +14,27 @@ + } + END_TEST + + +Suite * + +money_suite (void) + +{ + + Suite *s = suite_create ("Money"); + + + + /* Core test case */ + + TCase *tc_core = tcase_create ("Core"); + + tcase_add_test (tc_core, test_money_create); + + suite_add_tcase (s, tc_core); + + + + return s; + +} + + + int + main (void) + { + - return 0; + + int number_failed; + + Suite *s = money_suite (); + + SRunner *sr = srunner_create (s); + + srunner_run_all (sr, CK_NORMAL); + + number_failed = srunner_ntests_failed (sr); + + srunner_free (sr); + + return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; + } + + Most of the `money_suite()' code should be self-explanatory. We are +creating a suite, creating a test case, adding the test case to the +suite, and adding the unit test we created above to the test case. Why +separate this off into a separate function, rather than inline it in +`main()'? Because any new tests will get added in `money_suite()', but +nothing will need to change in `main()' for the rest of this example, +so main will stay relatively clean and simple. + + Unit tests are internally defined as static functions. This means +that the code to add unit tests to test cases must be in the same +compilation unit as the unit tests themselves. This provides another +reason to put the creation of the test suite in a separate function: +you may later want to keep one source file per suite; defining a +uniquely named suite creation function allows you later to define a +header file giving prototypes for all the suite creation functions, and +encapsulate the details of where and how unit tests are defined behind +those functions. See the test program defined for Check itself for an +example of this strategy. + + The code in `main()' bears some explanation. We are creating a +suite runner object of type `SRunner' from the `Suite' we created in +`money_suite()'. We then run the suite, using the `CK_NORMAL' flag to +specify that we should print a summary of the run, and list any +failures that may have occurred. We capture the number of failures +that occurred during the run, and use that to decide how to return. +The `check' target created by Automake uses the return value to decide +whether the tests passed or failed. + + Now that the tests are actually being run by `check_money', we +encounter linker errors again we try out `make check'. Try it for +yourself and see. The reason is that the `money.c' implementation of +the `money.h' interface hasn't been created yet. Let's go with the +fastest solution possible and implement stubs for each of the functions +in `money.c': + + --- src/money.1.c 2009-09-23 20:44:00.000000000 +1000 + +++ src/money.3.c 2009-09-23 20:44:00.000000000 +1000 + @@ -0,0 +1,26 @@ + +#include <stdlib.h> + +#include "money.h" + + + +Money * + +money_create (int amount, char *currency) + +{ + + return NULL; + +} + + + +int + +money_amount (Money * m) + +{ + + return 0; + +} + + + +char * + +money_currency (Money * m) + +{ + + return NULL; + +} + + + +void + +money_free (Money * m) + +{ + + return; + +} + + Note that we `#include <stdlib.h>' to get the definition of `NULL'. +Now, the code compiles and links when we run `make check', but our unit +test fails. Still, this is progress, and we can focus on making the +test pass. + + +File: check.info, Node: SRunner Output, Prev: Creating a Suite, Up: Tutorial + +3.5 SRunner Output +================== + +The function to run tests in an `SRunner' is defined as follows: + + void srunner_run_all (SRunner * sr, enum print_output print_mode); + + This function does two things: + + 1. It runs all of the unit tests for all of the test cases defined + for all of the suites in the SRunner, and collects the results in + the SRunner + + 2. It prints the results according to the `print_mode' specified. + + For SRunners that have already been run, there is also a separate +printing function defined as follows: + + void srunner_print (SRunner *sr, enum print_output print_mode); + + The enumeration values of `print_output' defined in Check that +parameter `print_mode' can assume are as follows: + +`CK_SILENT' + Specifies that no output is to be generated. If you use this flag, + you either need to programmatically examine the SRunner object, + print separately, or use test logging (*note Test Logging::.) + +`CK_MINIMAL' + Only a summary of the test run will be printed (number run, passed, + failed, errors). + +`CK_NORMAL' + Prints the summary of the run, and prints one message per failed + test. + +`CK_VERBOSE' + Prints the summary, and one message per test (passed or failed) + +`CK_ENV' + Gets the print mode from the environment variable `CK_VERBOSITY', + which can have the values "silent", "minimal", "normal", + "verbose". If the variable is not found or the value is not + recognized, the print mode is set to `CK_NORMAL'. + +`CK_SUBUNIT' + Prints running progress through the subunit + (https://launchpad.net/subunit/) test runner protocol. See + 'subunit support' under the Advanced Features section for more + information. + + With the `CK_NORMAL' flag specified in our `main()', let's rerun +make check now. As before, we get the following satisfying output: + + Running suite(s): Money + 0%: Checks: 1, Failures: 1, Errors: 0 + check_money.c:10:F:Core:test_money_create: Amount not set correctly on + creation + FAIL: check_money + ================================================== + 1 of 1 tests failed + Please report to check-devel@lists.sourceforge.net + ================================================== + + The first number in the summary line tells us that 0% of our tests +passed, and the rest of the line tells us that there was one check in +total, and of those checks, one failure and zero errors. The next line +tells us exactly where that failure occurred, and what kind of failure +it was (P for pass, F for failure, E for error). + + After that we have some higher level output generated by Automake: +the `check_money' program failed, and the bug-report address given in +`configure.ac' is printed. + + Let's implement the `money_amount' function, so that it will pass +its tests. We first have to create a Money structure to hold the +amount, and then implement the function to return the correct amount: + + --- src/money.3.c 2009-09-23 20:44:00.000000000 +1000 + +++ src/money.4.c 2009-09-23 20:44:00.000000000 +1000 + @@ -1,6 +1,11 @@ + #include <stdlib.h> + #include "money.h" + + +struct Money + +{ + + int amount; + +}; + + + Money * + money_create (int amount, char *currency) + { + @@ -10,7 +15,7 @@ + int + money_amount (Money * m) + { + - return 0; + + return m->amount; + } + + char * + + We will now rerun make check and... what's this? The output is now +as follows: + + Running suite(s): Money + 0%: Checks: 1, Failures: 0, Errors: 1 + check_money.c:5:E:Core:test_money_create: (after this point) Received + signal 11 (Segmentation fault) + + What does this mean? Note that we now have an error, rather than a +failure. This means that our unit test either exited early, or was +signaled. Next note that the failure message says "after this point"; +This means that somewhere after the point noted (`check_money.c', line +5) there was a problem: signal 11 (a.k.a. segmentation fault). The +last point reached is set on entry to the unit test, and after every +call to `fail_unless()', `fail()', or the special function +`mark_point()'. For example, if we wrote some test code as follows: + + stuff_that_works (); + mark_point (); + stuff_that_dies (); + + then the point returned will be that marked by `mark_point()'. + + The reason our test failed so horribly is that we haven't implemented +`money_create()' to create any `Money'. We'll go ahead and implement +that, the symmetric `money_free()', and `money_currency()' too, in +order to make our unit test pass again: + + --- src/money.4.c 2009-09-23 20:44:00.000000000 +1000 + +++ src/money.5.c 2009-09-23 20:44:00.000000000 +1000 + @@ -4,12 +4,21 @@ + struct Money + { + int amount; + + char *currency; + }; + + Money * + money_create (int amount, char *currency) + { + - return NULL; + + Money *m = malloc (sizeof (Money)); + + if (m == NULL) + + { + + return NULL; + + } + + + + m->amount = amount; + + m->currency = currency; + + return m; + } + + int + @@ -21,11 +30,12 @@ + char * + money_currency (Money * m) + { + - return NULL; + + return m->currency; + } + + void + money_free (Money * m) + { + + free (m); + return; + } + + +File: check.info, Node: Advanced Features, Next: Conclusion and References, Prev: Tutorial, Up: Top + +4 Advanced Features +******************* + +What you've seen so far is all you need for basic unit testing. The +features described in this section are additions to Check that make it +easier for the developer to write, run, and analyse tests. + +* Menu: + +* Running Multiple Cases:: +* No Fork Mode:: +* Test Fixtures:: +* Multiple Suites in one SRunner:: +* Testing Signal Handling and Exit Values:: +* Looping Tests:: +* Test Timeouts:: +* Determining Test Coverage:: +* Test Logging:: +* Subunit Support:: + + +File: check.info, Node: Running Multiple Cases, Next: No Fork Mode, Prev: Advanced Features, Up: Advanced Features + +4.1 Running Multiple Cases +========================== + +What happens if we pass `-1' as the `amount' in `money_create()'? What +should happen? Let's write a unit test. Since we are now testing +limits, we should also test what happens when we create `Money' where +`amount == 0'. Let's put these in a separate test case called "Limits" +so that `money_suite' is changed like so: + + --- tests/check_money.3.c 2009-09-23 20:44:00.000000000 +1000 + +++ tests/check_money.6.c 2009-09-23 20:44:00.000000000 +1000 + @@ -14,6 +14,23 @@ + } + END_TEST + + +START_TEST (test_money_create_neg) + +{ + + Money *m = money_create (-1, "USD"); + + fail_unless (m == NULL, + + "NULL should be returned on attempt to create with " + + "a negative amount"); + +} + +END_TEST + + + +START_TEST (test_money_create_zero) + +{ + + Money *m = money_create (0, "USD"); + + fail_unless (money_amount (m) == 0, + + "Zero is a valid amount of money"); + +} + +END_TEST + + + Suite * + money_suite (void) + { + @@ -24,6 +41,12 @@ + tcase_add_test (tc_core, test_money_create); + suite_add_tcase (s, tc_core); + + + /* Limits test case */ + + TCase *tc_limits = tcase_create ("Limits"); + + tcase_add_test (tc_limits, test_money_create_neg); + + tcase_add_test (tc_limits, test_money_create_zero); + + suite_add_tcase (s, tc_limits); + + + return s; + } + + Now we can rerun our suite, and fix the problem(s). Note that errors +in the "Core" test case will be reported as "Core", and errors in the +"Limits" test case will be reported as "Limits", giving you additional +information about where things broke. + + --- src/money.5.c 2009-09-23 20:44:00.000000000 +1000 + +++ src/money.6.c 2009-09-23 20:44:00.000000000 +1000 + @@ -10,6 +10,11 @@ + Money * + money_create (int amount, char *currency) + { + + if (amount < 0) + + { + + return NULL; + + } + + + Money *m = malloc (sizeof (Money)); + if (m == NULL) + { + + +File: check.info, Node: No Fork Mode, Next: Test Fixtures, Prev: Running Multiple Cases, Up: Advanced Features + +4.2 No Fork Mode +================ + +Check normally forks to create a separate address space. This allows a +signal or early exit to be caught and reported, rather than taking down +the entire test program, and is normally very useful. However, when +you are trying to debug why the segmentation fault or other program +error occurred, forking makes it difficult to use debugging tools. To +define fork mode for an `SRunner' object, you can do one of the +following: + + 1. Define the CK_FORK environment variable to equal "no". + + 2. Explicitly define the fork status through the use of the following + function: + + + void srunner_set_fork_status (SRunner * sr, enum fork_status fstat); + + The enum `fork_status' allows the `fstat' parameter to assume the +following values: `CK_FORK' and `CK_NOFORK'. An explicit call to +`srunner_set_fork_status()' overrides the `CK_FORK' environment +variable. + + +File: check.info, Node: Test Fixtures, Next: Multiple Suites in one SRunner, Prev: No Fork Mode, Up: Advanced Features + +4.3 Test Fixtures +================= + +We may want multiple tests that all use the same Money. In such cases, +rather than setting up and tearing down objects for each unit test, it +may be convenient to add some setup that is constant across all the +tests in a test case. Each such setup/teardown pair is called a "test +fixture" in test-driven development jargon. + + A fixture is created by defining a setup and/or a teardown function, +and associating it with a test case. There are two kinds of test +fixtures in Check: checked and unchecked fixtures. These are defined as +follows: + +Checked fixtures + are run inside the address space created by the fork to create the + unit test. Before each unit test in a test case, the `setup()' + function is run, if defined. After each unit test, the + `teardown()' function is run, if defined. Since they run inside + the forked address space, if checked fixtures signal or otherwise + fail, they will be caught and reported by the `SRunner'. A + checked `teardown()' fixture will run even if the unit test fails. + +Unchecked fixtures + are run in the same address space as the test program. Therefore + they may not signal or exit, but may use the fail functions. The + unchecked `setup()', if defined, is run before the test case is + started. The unchecked `teardown()', if defined, is run after the + test case is done. + + So for a test case that contains `check_one()' and `check_two()' +unit tests, `checked_setup()'/`checked_teardown()' checked fixtures, and +`unchecked_setup()'/`unchecked_teardown()' unchecked fixtures, the +control flow would be: + + unchecked_setup(); + fork(); + checked_setup(); + check_one(); + checked_teardown(); + wait(); + fork(); + checked_setup(); + check_two(); + checked_teardown(); + wait(); + unchecked_teardown(); + +* Menu: + +* Test Fixture Examples:: +* Checked vs Unchecked Fixtures:: + + +File: check.info, Node: Test Fixture Examples, Next: Checked vs Unchecked Fixtures, Prev: Test Fixtures, Up: Test Fixtures + +4.3.1 Test Fixture Examples +--------------------------- + +We create a test fixture in Check as follows: + + 1. Define global variables, and functions to setup and teardown the + globals. The functions both take `void' and return `void'. In + our example, we'll make `five_dollars' be a global created and + freed by `setup()' and `teardown()' respectively. + + 2. Add the `setup()' and `teardown()' functions to the test case with + `tcase_add_checked_fixture()'. In our example, this belongs in + the suite setup function `money_suite'. + + 3. Rewrite tests to use the globals. We'll rewrite our first to use + `five_dollars'. + + Note that the functions used for setup and teardown do not need to be +named `setup()' and `teardown()', but they must take `void' and return +`void'. We'll update `check_money.c' as follows: + + --- tests/check_money.6.c 2009-09-23 20:44:00.000000000 +1000 + +++ tests/check_money.7.c 2009-09-23 20:44:00.000000000 +1000 + @@ -2,15 +2,26 @@ + #include <check.h> + #include "../src/money.h" + + +Money *five_dollars; + + + +void + +setup (void) + +{ + + five_dollars = money_create (5, "USD"); + +} + + + +void + +teardown (void) + +{ + + money_free (five_dollars); + +} + + + START_TEST (test_money_create) + { + - Money *m; + - m = money_create (5, "USD"); + - fail_unless (money_amount (m) == 5, + + fail_unless (money_amount (five_dollars) == 5, + "Amount not set correctly on creation"); + - fail_unless (strcmp (money_currency (m), "USD") == 0, + + fail_unless (strcmp (money_currency (five_dollars), "USD") == 0, + "Currency not set correctly on creation"); + - money_free (m); + } + END_TEST + + @@ -38,6 +49,7 @@ + + /* Core test case */ + TCase *tc_core = tcase_create ("Core"); + + tcase_add_checked_fixture (tc_core, setup, teardown); + tcase_add_test (tc_core, test_money_create); + suite_add_tcase (s, tc_core); + + +File: check.info, Node: Checked vs Unchecked Fixtures, Prev: Test Fixture Examples, Up: Test Fixtures + +4.3.2 Checked vs Unchecked Fixtures +----------------------------------- + +Checked fixtures run once for each unit test in a test case, and so +they should not be used for expensive setup. However, if a checked +fixture fails and `CK_FORK' mode is being used, it will not bring down +the entire framework. + + On the other hand, unchecked fixtures run once for an entire test +case, as opposed to once per unit test, and so can be used for +expensive setup. However, since they may take down the entire test +program, they should only be used if they are known to be safe. + + Additionally, the isolation of objects created by unchecked fixtures +is not guaranteed by `CK_NOFORK' mode. Normally, in `CK_FORK' mode, +unit tests may abuse the objects created in an unchecked fixture with +impunity, without affecting other unit tests in the same test case, +because the fork creates a separate address space. However, in +`CK_NOFORK' mode, all tests live in the same address space, and side +effects in one test will affect the unchecked fixture for the other +tests. + + A checked fixture will generally not be affected by unit test side +effects, since the `setup()' is run before each unit test. There is an +exception for side effects to the total environment in which the test +program lives: for example, if the `setup()' function initializes a +file that a unit test then changes, the combination of the `teardown()' +function and `setup()' fuction must be able to restore the environment +for the next unit test. + + If the `setup()' function in a fixture fails, in either checked or +unchecked fixtures, the unit tests for the test case, and the +`teardown()' function for the fixture will not be run. A fixture error +will be created and reported to the `SRunner'. + + +File: check.info, Node: Multiple Suites in one SRunner, Next: Testing Signal Handling and Exit Values, Prev: Test Fixtures, Up: Advanced Features + +4.4 Multiple Suites in one SRunner +================================== + +In a large program, it will be convenient to create multiple suites, +each testing a module of the program. While one can create several +test programs, each running one `Suite', it may be convenient to create +one main test program, and use it to run multiple suites. The Check +test suite provides an example of how to do this. The main testing +program is called `check_check', and has a header file that declares +suite creation functions for all the module tests: + + Suite *make_sub_suite (void); + Suite *make_sub2_suite (void); + Suite *make_master_suite (void); + Suite *make_list_suite (void); + Suite *make_msg_suite (void); + Suite *make_log_suite (void); + Suite *make_limit_suite (void); + Suite *make_fork_suite (void); + Suite *make_fixture_suite (void); + Suite *make_pack_suite (void); + + The function `srunner_add_suite()' is used to add additional suites +to an `SRunner'. Here is the code that sets up and runs the `SRunner' +in the `main()' function in `check_check_main.c': + + SRunner *sr; + sr = srunner_create (make_master_suite ()); + srunner_add_suite (sr, make_list_suite ()); + srunner_add_suite (sr, make_msg_suite ()); + srunner_add_suite (sr, make_log_suite ()); + srunner_add_suite (sr, make_limit_suite ()); + srunner_add_suite (sr, make_fork_suite ()); + srunner_add_suite (sr, make_fixture_suite ()); + srunner_add_suite (sr, make_pack_suite ()); + + +File: check.info, Node: Testing Signal Handling and Exit Values, Next: Looping Tests, Prev: Multiple Suites in one SRunner, Up: Advanced Features + +4.5 Testing Signal Handling and Exit Values +=========================================== + +To enable testing of signal handling, there is a function +`tcase_add_test_raise_signal()' which is used instead of +`tcase_add_test()'. This function takes an additional signal argument, +specifying a signal that the test expects to receive. If no signal is +received this is logged as a failure. If a different signal is +received this is logged as an error. + + The signal handling functionality only works in CK_FORK mode. + + To enable testing of expected exits, there is a function +`tcase_add_exit_test()' which is used instead of `tcase_add_test()'. +This function takes an additional expected exit value argument, +specifying a value that the test is expected to exit with. If the test +exits with any other value this is logged as a failure. If the test +exits early this is logged as an error. + + The exit handling functionality only works in CK_FORK mode. + + +File: check.info, Node: Looping Tests, Next: Test Timeouts, Prev: Testing Signal Handling and Exit Values, Up: Advanced Features + +4.6 Looping Tests +================= + +Looping tests are tests that are called with a new context for each +loop iteration. This makes them ideal for table based tests. If loops +are used inside ordinary tests to test multiple values, only the first +error will be shown before the test exits. However, looping tests +allow for all errors to be shown at once, which can help out with +debugging. + + Adding a normal test with `tcase_add_loop_test()' instead of +`tcase_add_test()' will make the test function the body of a `for' +loop, with the addition of a fork before each call. The loop variable +`_i' is available for use inside the test function; for example, it +could serve as an index into a table. For failures, the iteration +which caused the failure is available in error messages and logs. + + Start and end values for the loop are supplied when adding the test. +The values are used as in a normal `for' loop. Below is some +pseudo-code to show the concept: + + for (_i = tfun->loop_start; _i < tfun->loop_end; _i++) + { + fork(); /* New context */ + tfun->f(_i); /* Call test function */ + wait(); /* Wait for child to terminate */ + } + + An example of looping test usage follows: + + static const int primes[5] = {2,3,5,7,11}; + + START_TEST (check_is_prime) + { + fail_unless (is_prime (primes[_i])); + } + END_TEST + + ... + + tcase_add_loop_test (tcase, check_is_prime, 0, 5); + + Looping tests work in `CK_NOFORK' mode as well, but without the +forking. This means that only the first error will be shown. + + +File: check.info, Node: Test Timeouts, Next: Determining Test Coverage, Prev: Looping Tests, Up: Advanced Features + +4.7 Test Timeouts +================= + +To be certain that a test won't hang indefinitely, all tests are run +with a timeout, the default being 4 seconds. If the test is not +finished within that time, it is killed and logged as an error. + + The timeout for a specific test case, which may contain multiple unit +tests, can be changed with the `tcase_set_timeout()' function. The +default timeout used for all test cases can be changed with the +environment variable `CK_DEFAULT_TIMEOUT', but this will not override +an explicitly set timeout. Another way to change the timeout length is +to use the `CK_TIMEOUT_MULTIPLIER' environment variable, which +multiplies all timeouts, including those set with +`tcase_set_timeout()', with the supplied integer value. All timeout +arguments are in seconds and a timeout of 0 seconds turns off the +timeout functionality. + + Test timeouts are only available in CK_FORK mode. + + +File: check.info, Node: Determining Test Coverage, Next: Test Logging, Prev: Test Timeouts, Up: Advanced Features + +4.8 Determining Test Coverage +============================= + +The term "code coverage" refers to the extent that the statements of a +program are executed during a run. Thus, "test coverage" refers to +code coverage when executing unit tests. This information can help you +to do two things: + + * Write better tests that more fully exercise your code, thereby + improving confidence in it. + + * Detect dead code that could be factored away. + + Check itself does not provide any means to determine this test +coverage; rather, this is the job of the compiler and its related +tools. In the case of `gcc' this information is easy to obtain, and +other compilers should provide similar facilities. + + Using `gcc', first enable test coverage profiling when building your +source by specifying the `-fprofile-arcs' and `-ftest-coverage' +switches: + + $ gcc -g -Wall -fprofile-arcs -ftest-coverage -o foo foo.c foo_check.c + + You will see that an additional `.gcno' file is created for each +`.c' input file. After running your tests the normal way, a `.gcda' +file is created for each `.gcno' file. These contain the coverage data +in a raw format. To combine this information and a source file into a +more readable format you can use the `gcov' utility: + + $ gcov foo.c + + This will produce the file `foo.c.gcov' which looks like this: + + -: 41: * object */ + 18: 42: if (ht->table[p] != NULL) { + -: 43: /* replaces the current entry */ + #####: 44: ht->count--; + #####: 45: ht->size -= ht->table[p]->size + + #####: 46: sizeof(struct hashtable_entry); + + As you can see this is an annotated source file with three columns: +usage information, line numbers, and the original source. The usage +information in the first column can either be '-', which means that +this line does not contain code that could be executed; '#####', which +means this line was never executed although it does contain code--these +are the lines that are probably most interesting for you; or a number, +which indicates how often that line was executed. + + This is of course only a very brief overview, but it should +illustrate how determining test coverage generally works, and how it +can help you. For more information or help with other compilers, +please refer to the relevant manuals. + + +File: check.info, Node: Test Logging, Next: Subunit Support, Prev: Determining Test Coverage, Up: Advanced Features + +4.9 Test Logging +================ + +Check supports an operation to log the results of a test run. To use +test logging, call the `srunner_set_log()' function with the name of +the log file you wish to create: + + SRunner *sr; + sr = srunner_create (make_s1_suite ()); + srunner_add_suite (sr, make_s2_suite ()); + srunner_set_log (sr, "test.log"); + srunner_run_all (sr, CK_NORMAL); + + In this example, Check will write the results of the run to +`test.log'. The `print_mode' argument to `srunner_run_all()' is +ignored during test logging; the log will contain a result entry, +organized by suite, for every test run. Here is an example of test log +output: + + Running suite S1 + ex_log_output.c:8:P:Core:test_pass: Test passed + ex_log_output.c:14:F:Core:test_fail: Failure + ex_log_output.c:18:E:Core:test_exit: (after this point) Early exit + with return value 1 + Running suite S2 + ex_log_output.c:26:P:Core:test_pass2: Test passed + Results for all suites run: + 50%: Checks: 4, Failures: 1, Errors: 1 + +* Menu: + +* XML Logging:: + + +File: check.info, Node: XML Logging, Prev: Test Logging, Up: Test Logging + +4.9.1 XML Logging +----------------- + +The log can also be written in XML. The following functions define the +interface for XML logs: + + void srunner_set_xml (SRunner *sr, const char *fname); + int srunner_has_xml (SRunner *sr); + const char *srunner_xml_fname (SRunner *sr); + + The only thing you need to do to get XML output is call +`srunner_set_xml()' before the tests are run. Here is an example of +the same log output as before but in XML: + + <?xml version="1.0"?> + <testsuites xmlns="http://check.sourceforge.net/ns"> + <datetime>2004-08-20 12:53:32</datetime> + <suite> + <title>S1</title> + <test result="success"> + <path>.</path> + <fn>ex_xml_output.c:8</fn> + <id>test_pass</id> + <description>Core</description> + <message>Passed</message> + </test> + <test result="failure"> + <path>.</path> + <fn>ex_xml_output.c:14</fn> + <id>test_fail</id> + <description>Core</description> + <message>Failure</message> + </test> + <test result="error"> + <path>.</path> + <fn>ex_xml_output.c:18</fn> + <id>test_exit</id> + <description>Core</description> + <message>Early exit with return value 1</message> + </test> + </suite> + <suite> + <title>S2</title> + <test result="success"> + <path>.</path> + <fn>ex_xml_output.c:26</fn> + <id>test_pass2</id> + <description>Core</description> + <message>Passed</message> + </test> + </suite> + <duration>0.304875</duration> + </testsuites> + + +File: check.info, Node: Subunit Support, Prev: Test Logging, Up: Advanced Features + +4.10 Subunit Support +==================== + +Check supports running test suites with subunit output. This can be +useful to combine test results from multiple languages, or to perform +programmatic analysis on the results of multiple check test suites or +otherise handle test results in a programmatic manner. Using subunit +with check is very straight forward. There are two steps: 1) In your +check test suite driver pass 'CK_SUBUNIT' as the output mode for your +srunner. + + SRunner *sr; + sr = srunner_create (make_s1_suite ()); + srunner_add_suite (sr, make_s2_suite ()); + srunner_run_all (sr, CK_SUBUNIT); + 2) Setup your main language test runner to run your check based test +executable. For instance using python: + + + import subunit + + class ShellTests(subunit.ExecTestCase): + """Run some tests from the C codebase.""" + + def test_group_one(self): + """./foo/check_driver""" + + def test_group_two(self): + """./foo/other_driver""" + + In this example, running the test suite ShellTests in python (using +any test runner - unittest.py, tribunal, trial, nose or others) will run +./foo/check_driver and ./foo/other_driver and report on their result. + + Subunit is hosted on launchpad - the subunit +(https://launchpad.net/subunit/) project there contains bug tracker, +future plans, and source code control details. + + +File: check.info, Node: Conclusion and References, Next: AM_PATH_CHECK, Prev: Advanced Features, Up: Top + +5 Conclusion and References +*************************** + +The tutorial and description of advanced features has provided an +introduction to all of the functionality available in Check. +Hopefully, this is enough to get you started writing unit tests with +Check. All the rest is simply application of what has been learned so +far with repeated application of the "test a little, code a little" +strategy. + + For further reference, see Kent Beck, "Test-Driven Development: By +Example", 1st ed., Addison-Wesley, 2003. ISBN 0-321-14653-0. + + If you know of other authoritative references to unit testing and +test-driven development, please send us a patch to this manual. + + +File: check.info, Node: AM_PATH_CHECK, Next: Copying This Manual, Prev: Conclusion and References, Up: Top + +6 AM_PATH_CHECK +*************** + +The `AM_PATH_CHECK()' macro is defined in the file `check.m4' which is +installed by Check. It has some optional parameters that you might +find useful in your `configure.ac': + +AM_PATH_CHECK([MINIMUM-VERSION, + [ACTION-IF-FOUND[,ACTION-IF-NOT-FOUND]]]) + + `AM_PATH_CHECK' does several things: + + 1. It ensures check.h is available + + 2. It ensures a compatible version of Check is installed + + 3. It sets `CHECK_CFLAGS' and `CHECK_LIBS' for use by Automake. + + If you include `AM_PATH_CHECK()' in `configure.ac' and subsequently +see warnings when attempting to create `configure', it probably means +one of the following things: + + 1. You forgot to call `aclocal'. `autoreconf' will do this for you. + + 2. `aclocal' can't find `check.m4'. Here are some possible + solutions: + + a. Call `aclocal' with `-I' set to the location of `check.m4'. + This means you have to call both `aclocal' and `autoreconf'. + + b. Add the location of `check.m4' to the `dirlist' used by + `aclocal' and then call `autoreconf'. This means you need + permission to modify the `dirlist'. + + c. Set `ACLOCAL_AMFLAGS' in your top-level `Makefile.am' to + include `-I DIR' with `DIR' being the location of `check.m4'. + Then call `autoreconf'. + + +File: check.info, Node: Copying This Manual, Next: Index, Prev: AM_PATH_CHECK, Up: Top + +Appendix A Copying This Manual +****************************** + +* Menu: + +* GNU Free Documentation License:: License for copying this manual. + + +File: check.info, Node: GNU Free Documentation License, Up: Copying This Manual + +A.1 GNU Free Documentation License +================================== + + Version 1.2, November 2002 + + Copyright (C) 2000,2001,2002 Free Software Foundation, Inc. + 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + 0. PREAMBLE + + The purpose of this License is to make a manual, textbook, or other + functional and useful document "free" in the sense of freedom: to + assure everyone the effective freedom to copy and redistribute it, + with or without modifying it, either commercially or + noncommercially. Secondarily, this License preserves for the + author and publisher a way to get credit for their work, while not + being considered responsible for modifications made by others. + + This License is a kind of "copyleft", which means that derivative + works of the document must themselves be free in the same sense. + It complements the GNU General Public License, which is a copyleft + license designed for free software. + + We have designed this License in order to use it for manuals for + free software, because free software needs free documentation: a + free program should come with manuals providing the same freedoms + that the software does. But this License is not limited to + software manuals; it can be used for any textual work, regardless + of subject matter or whether it is published as a printed book. + We recommend this License principally for works whose purpose is + instruction or reference. + + 1. APPLICABILITY AND DEFINITIONS + + This License applies to any manual or other work, in any medium, + that contains a notice placed by the copyright holder saying it + can be distributed under the terms of this License. Such a notice + grants a world-wide, royalty-free license, unlimited in duration, + to use that work under the conditions stated herein. The + "Document", below, refers to any such manual or work. Any member + of the public is a licensee, and is addressed as "you". You + accept the license if you copy, modify or distribute the work in a + way requiring permission under copyright law. + + A "Modified Version" of the Document means any work containing the + Document or a portion of it, either copied verbatim, or with + modifications and/or translated into another language. + + A "Secondary Section" is a named appendix or a front-matter section + of the Document that deals exclusively with the relationship of the + publishers or authors of the Document to the Document's overall + subject (or to related matters) and contains nothing that could + fall directly within that overall subject. (Thus, if the Document + is in part a textbook of mathematics, a Secondary Section may not + explain any mathematics.) The relationship could be a matter of + historical connection with the subject or with related matters, or + of legal, commercial, philosophical, ethical or political position + regarding them. + + The "Invariant Sections" are certain Secondary Sections whose + titles are designated, as being those of Invariant Sections, in + the notice that says that the Document is released under this + License. If a section does not fit the above definition of + Secondary then it is not allowed to be designated as Invariant. + The Document may contain zero Invariant Sections. If the Document + does not identify any Invariant Sections then there are none. + + The "Cover Texts" are certain short passages of text that are + listed, as Front-Cover Texts or Back-Cover Texts, in the notice + that says that the Document is released under this License. A + Front-Cover Text may be at most 5 words, and a Back-Cover Text may + be at most 25 words. + + A "Transparent" copy of the Document means a machine-readable copy, + represented in a format whose specification is available to the + general public, that is suitable for revising the document + straightforwardly with generic text editors or (for images + composed of pixels) generic paint programs or (for drawings) some + widely available drawing editor, and that is suitable for input to + text formatters or for automatic translation to a variety of + formats suitable for input to text formatters. A copy made in an + otherwise Transparent file format whose markup, or absence of + markup, has been arranged to thwart or discourage subsequent + modification by readers is not Transparent. An image format is + not Transparent if used for any substantial amount of text. A + copy that is not "Transparent" is called "Opaque". + + Examples of suitable formats for Transparent copies include plain + ASCII without markup, Texinfo input format, LaTeX input format, + SGML or XML using a publicly available DTD, and + standard-conforming simple HTML, PostScript or PDF designed for + human modification. Examples of transparent image formats include + PNG, XCF and JPG. Opaque formats include proprietary formats that + can be read and edited only by proprietary word processors, SGML or + XML for which the DTD and/or processing tools are not generally + available, and the machine-generated HTML, PostScript or PDF + produced by some word processors for output purposes only. + + The "Title Page" means, for a printed book, the title page itself, + plus such following pages as are needed to hold, legibly, the + material this License requires to appear in the title page. For + works in formats which do not have any title page as such, "Title + Page" means the text near the most prominent appearance of the + work's title, preceding the beginning of the body of the text. + + A section "Entitled XYZ" means a named subunit of the Document + whose title either is precisely XYZ or contains XYZ in parentheses + following text that translates XYZ in another language. (Here XYZ + stands for a specific section name mentioned below, such as + "Acknowledgements", "Dedications", "Endorsements", or "History".) + To "Preserve the Title" of such a section when you modify the + Document means that it remains a section "Entitled XYZ" according + to this definition. + + The Document may include Warranty Disclaimers next to the notice + which states that this License applies to the Document. These + Warranty Disclaimers are considered to be included by reference in + this License, but only as regards disclaiming warranties: any other + implication that these Warranty Disclaimers may have is void and + has no effect on the meaning of this License. + + 2. VERBATIM COPYING + + You may copy and distribute the Document in any medium, either + commercially or noncommercially, provided that this License, the + copyright notices, and the license notice saying this License + applies to the Document are reproduced in all copies, and that you + add no other conditions whatsoever to those of this License. You + may not use technical measures to obstruct or control the reading + or further copying of the copies you make or distribute. However, + you may accept compensation in exchange for copies. If you + distribute a large enough number of copies you must also follow + the conditions in section 3. + + You may also lend copies, under the same conditions stated above, + and you may publicly display copies. + + 3. COPYING IN QUANTITY + + If you publish printed copies (or copies in media that commonly + have printed covers) of the Document, numbering more than 100, and + the Document's license notice requires Cover Texts, you must + enclose the copies in covers that carry, clearly and legibly, all + these Cover Texts: Front-Cover Texts on the front cover, and + Back-Cover Texts on the back cover. Both covers must also clearly + and legibly identify you as the publisher of these copies. The + front cover must present the full title with all words of the + title equally prominent and visible. You may add other material + on the covers in addition. Copying with changes limited to the + covers, as long as they preserve the title of the Document and + satisfy these conditions, can be treated as verbatim copying in + other respects. + + If the required texts for either cover are too voluminous to fit + legibly, you should put the first ones listed (as many as fit + reasonably) on the actual cover, and continue the rest onto + adjacent pages. + + If you publish or distribute Opaque copies of the Document + numbering more than 100, you must either include a + machine-readable Transparent copy along with each Opaque copy, or + state in or with each Opaque copy a computer-network location from + which the general network-using public has access to download + using public-standard network protocols a complete Transparent + copy of the Document, free of added material. If you use the + latter option, you must take reasonably prudent steps, when you + begin distribution of Opaque copies in quantity, to ensure that + this Transparent copy will remain thus accessible at the stated + location until at least one year after the last time you + distribute an Opaque copy (directly or through your agents or + retailers) of that edition to the public. + + It is requested, but not required, that you contact the authors of + the Document well before redistributing any large number of + copies, to give them a chance to provide you with an updated + version of the Document. + + 4. MODIFICATIONS + + You may copy and distribute a Modified Version of the Document + under the conditions of sections 2 and 3 above, provided that you + release the Modified Version under precisely this License, with + the Modified Version filling the role of the Document, thus + licensing distribution and modification of the Modified Version to + whoever possesses a copy of it. In addition, you must do these + things in the Modified Version: + + A. Use in the Title Page (and on the covers, if any) a title + distinct from that of the Document, and from those of + previous versions (which should, if there were any, be listed + in the History section of the Document). You may use the + same title as a previous version if the original publisher of + that version gives permission. + + B. List on the Title Page, as authors, one or more persons or + entities responsible for authorship of the modifications in + the Modified Version, together with at least five of the + principal authors of the Document (all of its principal + authors, if it has fewer than five), unless they release you + from this requirement. + + C. State on the Title page the name of the publisher of the + Modified Version, as the publisher. + + D. Preserve all the copyright notices of the Document. + + E. Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + + F. Include, immediately after the copyright notices, a license + notice giving the public permission to use the Modified + Version under the terms of this License, in the form shown in + the Addendum below. + + G. Preserve in that license notice the full lists of Invariant + Sections and required Cover Texts given in the Document's + license notice. + + H. Include an unaltered copy of this License. + + I. Preserve the section Entitled "History", Preserve its Title, + and add to it an item stating at least the title, year, new + authors, and publisher of the Modified Version as given on + the Title Page. If there is no section Entitled "History" in + the Document, create one stating the title, year, authors, + and publisher of the Document as given on its Title Page, + then add an item describing the Modified Version as stated in + the previous sentence. + + J. Preserve the network location, if any, given in the Document + for public access to a Transparent copy of the Document, and + likewise the network locations given in the Document for + previous versions it was based on. These may be placed in + the "History" section. You may omit a network location for a + work that was published at least four years before the + Document itself, or if the original publisher of the version + it refers to gives permission. + + K. For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the + section all the substance and tone of each of the contributor + acknowledgements and/or dedications given therein. + + L. Preserve all the Invariant Sections of the Document, + unaltered in their text and in their titles. Section numbers + or the equivalent are not considered part of the section + titles. + + M. Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + + N. Do not retitle any existing section to be Entitled + "Endorsements" or to conflict in title with any Invariant + Section. + + O. Preserve any Warranty Disclaimers. + + If the Modified Version includes new front-matter sections or + appendices that qualify as Secondary Sections and contain no + material copied from the Document, you may at your option + designate some or all of these sections as invariant. To do this, + add their titles to the list of Invariant Sections in the Modified + Version's license notice. These titles must be distinct from any + other section titles. + + You may add a section Entitled "Endorsements", provided it contains + nothing but endorsements of your Modified Version by various + parties--for example, statements of peer review or that the text + has been approved by an organization as the authoritative + definition of a standard. + + You may add a passage of up to five words as a Front-Cover Text, + and a passage of up to 25 words as a Back-Cover Text, to the end + of the list of Cover Texts in the Modified Version. Only one + passage of Front-Cover Text and one of Back-Cover Text may be + added by (or through arrangements made by) any one entity. If the + Document already includes a cover text for the same cover, + previously added by you or by arrangement made by the same entity + you are acting on behalf of, you may not add another; but you may + replace the old one, on explicit permission from the previous + publisher that added the old one. + + The author(s) and publisher(s) of the Document do not by this + License give permission to use their names for publicity for or to + assert or imply endorsement of any Modified Version. + + 5. COMBINING DOCUMENTS + + You may combine the Document with other documents released under + this License, under the terms defined in section 4 above for + modified versions, provided that you include in the combination + all of the Invariant Sections of all of the original documents, + unmodified, and list them all as Invariant Sections of your + combined work in its license notice, and that you preserve all + their Warranty Disclaimers. + + The combined work need only contain one copy of this License, and + multiple identical Invariant Sections may be replaced with a single + copy. If there are multiple Invariant Sections with the same name + but different contents, make the title of each such section unique + by adding at the end of it, in parentheses, the name of the + original author or publisher of that section if known, or else a + unique number. Make the same adjustment to the section titles in + the list of Invariant Sections in the license notice of the + combined work. + + In the combination, you must combine any sections Entitled + "History" in the various original documents, forming one section + Entitled "History"; likewise combine any sections Entitled + "Acknowledgements", and any sections Entitled "Dedications". You + must delete all sections Entitled "Endorsements." + + 6. COLLECTIONS OF DOCUMENTS + + You may make a collection consisting of the Document and other + documents released under this License, and replace the individual + copies of this License in the various documents with a single copy + that is included in the collection, provided that you follow the + rules of this License for verbatim copying of each of the + documents in all other respects. + + You may extract a single document from such a collection, and + distribute it individually under this License, provided you insert + a copy of this License into the extracted document, and follow + this License in all other respects regarding verbatim copying of + that document. + + 7. AGGREGATION WITH INDEPENDENT WORKS + + A compilation of the Document or its derivatives with other + separate and independent documents or works, in or on a volume of + a storage or distribution medium, is called an "aggregate" if the + copyright resulting from the compilation is not used to limit the + legal rights of the compilation's users beyond what the individual + works permit. When the Document is included in an aggregate, this + License does not apply to the other works in the aggregate which + are not themselves derivative works of the Document. + + If the Cover Text requirement of section 3 is applicable to these + copies of the Document, then if the Document is less than one half + of the entire aggregate, the Document's Cover Texts may be placed + on covers that bracket the Document within the aggregate, or the + electronic equivalent of covers if the Document is in electronic + form. Otherwise they must appear on printed covers that bracket + the whole aggregate. + + 8. TRANSLATION + + Translation is considered a kind of modification, so you may + distribute translations of the Document under the terms of section + 4. Replacing Invariant Sections with translations requires special + permission from their copyright holders, but you may include + translations of some or all Invariant Sections in addition to the + original versions of these Invariant Sections. You may include a + translation of this License, and all the license notices in the + Document, and any Warranty Disclaimers, provided that you also + include the original English version of this License and the + original versions of those notices and disclaimers. In case of a + disagreement between the translation and the original version of + this License or a notice or disclaimer, the original version will + prevail. + + If a section in the Document is Entitled "Acknowledgements", + "Dedications", or "History", the requirement (section 4) to + Preserve its Title (section 1) will typically require changing the + actual title. + + 9. TERMINATION + + You may not copy, modify, sublicense, or distribute the Document + except as expressly provided for under this License. Any other + attempt to copy, modify, sublicense or distribute the Document is + void, and will automatically terminate your rights under this + License. However, parties who have received copies, or rights, + from you under this License will not have their licenses + terminated so long as such parties remain in full compliance. + + 10. FUTURE REVISIONS OF THIS LICENSE + + The Free Software Foundation may publish new, revised versions of + the GNU Free Documentation License from time to time. Such new + versions will be similar in spirit to the present version, but may + differ in detail to address new problems or concerns. See + `http://www.gnu.org/copyleft/'. + + Each version of the License is given a distinguishing version + number. If the Document specifies that a particular numbered + version of this License "or any later version" applies to it, you + have the option of following the terms and conditions either of + that specified version or of any later version that has been + published (not as a draft) by the Free Software Foundation. If + the Document does not specify a version number of this License, + you may choose any version ever published (not as a draft) by the + Free Software Foundation. + +A.1.1 ADDENDUM: How to use this License for your documents +---------------------------------------------------------- + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and license +notices just after the title page: + + Copyright (C) YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover + Texts. A copy of the license is included in the section entitled ``GNU + Free Documentation License''. + + If you have Invariant Sections, Front-Cover Texts and Back-Cover +Texts, replace the "with...Texts." line with this: + + with the Invariant Sections being LIST THEIR TITLES, with + the Front-Cover Texts being LIST, and with the Back-Cover Texts + being LIST. + + If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + + If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, to +permit their use in free software. + + +File: check.info, Node: Index, Prev: Copying This Manual, Up: Top + +Index +***** + + +* Menu: + +* AM_PATH_CHECK(): AM_PATH_CHECK. (line 6) +* CK_DEFAULT_TIMEOUT: Test Timeouts. (line 6) +* CK_ENV: SRunner Output. (line 42) +* CK_FORK: No Fork Mode. (line 14) +* CK_MINIMAL: SRunner Output. (line 31) +* CK_NORMAL: SRunner Output. (line 35) +* CK_SILENT: SRunner Output. (line 26) +* CK_SUBUNIT: SRunner Output. (line 48) +* CK_TIMEOUT_MULTIPLIER: Test Timeouts. (line 6) +* CK_VERBOSE: SRunner Output. (line 39) +* CK_VERBOSITY: SRunner Output. (line 42) +* fail(): Test a Little. (line 49) +* fail_if(): Test a Little. (line 59) +* fail_unless(): Test a Little. (line 40) +* FDL, GNU Free Documentation License: GNU Free Documentation License. + (line 6) +* frameworks: Other Frameworks for C. + (line 6) +* introduction: Introduction. (line 6) +* mark_point(): SRunner Output. (line 113) +* other frameworks: Other Frameworks for C. + (line 6) +* srunner_add_suite(): Multiple Suites in one SRunner. + (line 25) +* srunner_has_xml(): XML Logging. (line 6) +* srunner_run_all(): SRunner Output. (line 6) +* srunner_set_fork_status(): No Fork Mode. (line 14) +* srunner_set_log(): Test Logging. (line 6) +* srunner_set_xml(): XML Logging. (line 6) +* srunner_xml_fname(): XML Logging. (line 6) +* tcase_add_checked_fixture(): Test Fixture Examples. + (line 13) +* tcase_add_exit_test(): Testing Signal Handling and Exit Values. + (line 15) +* tcase_add_loop_test(): Looping Tests. (line 13) +* tcase_add_test_raise_signal(): Testing Signal Handling and Exit Values. + (line 6) +* tcase_set_timeout(): Test Timeouts. (line 6) + + + +Tag Table: +Node: Top768 +Node: Introduction2573 +Node: Unit Testing in C4709 +Node: Other Frameworks for C6478 +Node: Tutorial10265 +Node: How to Write a Test11040 +Node: Setting Up the Money Build11691 +Node: Test a Little16919 +Node: Creating a Suite21486 +Node: SRunner Output25671 +Node: Advanced Features31125 +Node: Running Multiple Cases31727 +Node: No Fork Mode33950 +Node: Test Fixtures34966 +Node: Test Fixture Examples37033 +Node: Checked vs Unchecked Fixtures39195 +Node: Multiple Suites in one SRunner41059 +Node: Testing Signal Handling and Exit Values42719 +Node: Looping Tests43826 +Node: Test Timeouts45538 +Node: Determining Test Coverage46568 +Node: Test Logging49044 +Node: XML Logging50241 +Node: Subunit Support52006 +Node: Conclusion and References53472 +Node: AM_PATH_CHECK54254 +Node: Copying This Manual55694 +Node: GNU Free Documentation License55930 +Node: Index78339 + +End Tag Table diff --git a/doc/check.texi b/doc/check.texi new file mode 100644 index 0000000..22d419e --- /dev/null +++ b/doc/check.texi @@ -0,0 +1,1422 @@ +\input texinfo @c -*-texinfo-*- +@c %**start of header +@setfilename check.info +@include version.texi +@settitle Check @value{VERSION} +@syncodeindex fn cp +@syncodeindex tp cp +@syncodeindex vr cp +@c %**end of header + +@copying +This manual is for Check +(version @value{VERSION}, @value{UPDATED}), +a unit testing framework for C. + +Copyright @copyright{} 2001--2009 Arien Malec, Chris Pickett, Fredrik +Hugosson, and Robert Lemmen. + +@quotation +Permission is granted to copy, distribute and/or modify this document +under the terms of the @acronym{GNU} Free Documentation License, +Version 1.2 or any later version published by the Free Software +Foundation; with no Invariant Sections, no Front-Cover texts, and no +Back-Cover Texts. A copy of the license is included in the section +entitled ``@acronym{GNU} Free Documentation License.'' +@end quotation +@end copying + +@dircategory Software development +@direntry +* Check: (check)Introduction. +@end direntry + +@titlepage +@title Check +@subtitle A Unit Testing Framework for C +@subtitle for version @value{VERSION}, @value{UPDATED} +@author Arien Malec +@author Chris Pickett +@author Fredrik Hugosson +@author Robert Lemmen +@author Robert Collins + +@c The following two commands start the copyright page. +@page +@vskip 0pt plus 1filll +@insertcopying +@end titlepage + +@c Output the table of contents at the beginning. +@contents + +@ifnottex +@node Top, Introduction, (dir), (dir) +@top Check + +@insertcopying + +Please send corrections to this manual to +@email{check-devel AT lists.sourceforge.net}. We'd prefer it if you can +send a unified diff (@command{diff -u}) against the +@file{doc/check.texi} file that ships with Check, but something is +still better than nothing if you can't manage that. +@end ifnottex + +@menu +* Introduction:: +* Unit Testing in C:: +* Tutorial:: +* Advanced Features:: +* Conclusion and References:: +* AM_PATH_CHECK:: +* Copying This Manual:: +* Index:: + +@detailmenu + --- The Detailed Node Listing --- + +Unit Testing in C + +* Other Frameworks for C:: + +Tutorial: Basic Unit Testing + +* How to Write a Test:: +* Setting Up the Money Build:: +* Test a Little:: +* Creating a Suite:: +* SRunner Output:: + +Advanced Features + +* Running Multiple Cases:: +* No Fork Mode:: +* Test Fixtures:: +* Multiple Suites in one SRunner:: +* Testing Signal Handling and Exit Values:: +* Looping Tests:: +* Test Timeouts:: +* Determining Test Coverage:: +* Test Logging:: +* Subunit Support:: + +Test Fixtures + +* Test Fixture Examples:: +* Checked vs Unchecked Fixtures:: + +Test Logging + +* XML Logging:: + +Copying This Manual + +* GNU Free Documentation License:: License for copying this manual. + +@end detailmenu +@end menu + +@node Introduction, Unit Testing in C, Top, Top +@chapter Introduction +@cindex introduction + +Check is a unit testing framework for C. It was inspired by similar +frameworks that currently exist for most programming languages; the +most famous example being @uref{http://www.junit.org, JUnit} for Java. +There is a list of unit test frameworks for multiple languages at +@uref{http://www.xprogramming.com/software.htm}. Unit testing has a +long history as part of formal quality assurance methodologies, but +has recently been associated with the lightweight methodology called +Extreme Programming. In that methodology, the characteristic practice +involves interspersing unit test writing with coding (``test a +little, code a little''). While the incremental unit test/code +approach is indispensable to Extreme Programming, it is also +applicable, and perhaps indispensable, outside of that methodology. + +The incremental test/code approach provides three main benefits to the +developer: + +@enumerate +@item +Because the unit tests use the interface to the unit being tested, +they allow the developer to think about how the interface should be +designed for usage early in the coding process. + +@item +They help the developer think early about aberrant cases, and code +accordingly. + +@item +By providing a documented level of correctness, they allow the +developer to refactor (see @uref{http://www.refactoring.com}) +aggressively. +@end enumerate + +That third reason is the one that turns people into unit testing +addicts. There is nothing so satisfying as doing a wholesale +replacement of an implementation, and having the unit tests reassure +you at each step of that change that all is well. It is like the +difference between exploring the wilderness with and without a good +map and compass: without the proper gear, you are more likely to +proceed cautiously and stick to the marked trails; with it, you can +take the most direct path to where you want to go. + +Look at the Check homepage for the latest information on Check: +@uref{http://check.sourceforge.net}. + +The Check project page is at: +@uref{http://sourceforge.net/projects/check/}. + +@node Unit Testing in C, Tutorial, Introduction, Top +@chapter Unit Testing in C +@ C unit testing + +The approach to unit testing frameworks used for Check originated with +Smalltalk, which is a late binding object-oriented language supporting +reflection. Writing a framework for C requires solving some special +problems that frameworks for Smalltalk, Java or Python don't have to +face. In all of those language, the worst that a unit test can do is +fail miserably, throwing an exception of some sort. In C, a unit test +is just as likely to trash its address space as it is to fail to meet +its test requirements, and if the test framework sits in the same +address space, goodbye test framework. + +To solve this problem, Check uses the @code{fork()} system call to +create a new address space in which to run each unit test, and then +uses message queues to send information on the testing process back to +the test framework. That way, your unit test can do all sorts of +nasty things with pointers, and throw a segmentation fault, and the +test framework will happily note a unit test error, and chug along. + +The Check framework is also designed to play happily with common +development environments for C programming. The author designed Check +around Autoconf/Automake (thus the name Check: @command{make check} is +the idiom used for testing with Autoconf/Automake), and the test +failure messages thrown up by Check use the common idiom of +@samp{filename:linenumber:message} used by @command{gcc} and family to +report problems in source code. With (X)Emacs, the output of Check +allows one to quickly navigate to the location of the unit test that +failed; presumably that also works in VI and IDEs. + +@menu +* Other Frameworks for C:: +@end menu + +@node Other Frameworks for C, , Unit Testing in C, Unit Testing in C +@section Other Frameworks for C +@cindex other frameworks +@cindex frameworks + +The authors know of the following additional unit testing frameworks +for C: + +@table @asis + +@item AceUnit +AceUnit (Advanced C and Embedded Unit) bills itself as a comfortable C +code unit test framework. It tries to mimick JUnit 4.x and includes +reflection-like capabilities. AceUnit can be used in resource +constraint environments, e.g. embedded software development, and +importantly it runs fine in environments where you cannot include a +single standard header file and cannot invoke a single standard C +function from the ANSI / ISO C libraries. It also has a Windows port. +It does not use forks to trap signals, although the authors have +expressed interest in adding such a feature. See the +@uref{http://aceunit.sourceforge.net/, AceUnit homepage}. + +@item GNU Autounit +Much along the same lines as Check, including forking to run unit +tests in a separate address space (in fact, the original author of +Check borrowed the idea from @acronym{GNU} Autounit). @acronym{GNU} +Autounit uses GLib extensively, which means that linking and such need +special options, but this may not be a big problem to you, especially +if you are already using GTK or GLib. See the +@uref{http://www.recursism.com/s2004/zp/products/gnu+autounit, GNU +Autounit homepage}. + +@item cUnit +Also uses GLib, but does not fork to protect the address space of unit +tests. See the +@uref{http://web.archive.org/web/*/http://people.codefactory.se/~spotty/cunit/, +archived cUnit homepage}. + +@item CUnit +Standard C, with plans for a Win32 GUI implementation. Does not +currently fork or otherwise protect the address space of unit tests. +In early development. See the @uref{http://cunit.sourceforge.net, +CUnit homepage}. + +@item CppUnit +The premier unit testing framework for C++; you can also use it to test C +code. It is stable, actively developed, and has a GUI interface. The +primary reasons not to use CppUnit for C are first that it is quite +big, and second you have to write your tests in C++, which means you +need a C++ compiler. If these don't sound like concerns, it is +definitely worth considering, along with other C++ unit testing +frameworks. See the +@uref{http://cppunit.sourceforge.net/cppunit-wiki, CppUnit homepage}. + +@item embUnit +embUnit (Embedded Unit) is another unit test framework for embedded +systems. This one appears to be superseded by AceUnit. +@uref{https://sourceforge.net/projects/embunit/, Embedded Unit +homepage}. + +@item MinUnit +A minimal set of macros and that's it! The point is to +show how easy it is to unit test your code. See the +@uref{http://www.jera.com/techinfo/jtns/jtn002.html, MinUnit +homepage}. + +@item CUnit for Mr. Ando +A CUnit implementation that is fairly new, and apparently still in +early development. See the +@uref{http://park.ruru.ne.jp/ando/work/CUnitForAndo/html/, CUnit for +Mr. Ando homepage}. +@end table + +This list was last updated in March 2008. If you know of other C unit +test frameworks, please send an email plus description to +@email{check-devel AT lists.sourceforge.net} and we will add the entry +to this list. + +It is the authors' considered opinion that forking or otherwise +trapping and reporting signals is indispensable for unit testing (but +it probably wouldn't be hard to add that to frameworks without that +feature). Try 'em all out: adapt this tutorial to use all of the +frameworks above, and use whichever you like. Contribute, spread the +word, and make one a standard. Languages such as Java and Python are +fortunate to have standard unit testing frameworks; it would be desirable +that C have one as well. + +@node Tutorial, Advanced Features, Unit Testing in C, Top +@chapter Tutorial: Basic Unit Testing + +This tutorial will use the JUnit +@uref{http://junit.sourceforge.net/doc/testinfected/testing.htm, Test +Infected} article as a starting point. We will be creating a library +to represent money, @code{libmoney}, that allows conversions between +different currency types. The development style will be ``test a +little, code a little'', with unit test writing preceding coding. +This constantly gives us insights into module usage, and also makes +sure we are constantly thinking about how to test our code. + +@menu +* How to Write a Test:: +* Setting Up the Money Build:: +* Test a Little:: +* Creating a Suite:: +* SRunner Output:: +@end menu + +@node How to Write a Test, Setting Up the Money Build, Tutorial, Tutorial +@section How to Write a Test + +Test writing using Check is very simple. The file in which the checks +are defined must include @file{check.h} as so: +@example +@verbatim +#include <check.h> +@end verbatim +@end example + +The basic unit test looks as follows: +@example +@verbatim +START_TEST (test_name) +{ + /* unit test code */ +} +END_TEST +@end verbatim +@end example + +The @code{START_TEST}/@code{END_TEST} pair are macros that setup basic +structures to permit testing. It is a mistake to leave off the +@code{END_TEST} marker; doing so produces all sorts of strange errors +when the check is compiled. + +@node Setting Up the Money Build, Test a Little, How to Write a Test, Tutorial +@section Setting Up the Money Build + +Since we are creating a library to handle money, we will first create +an interface in @file{money.h}, an implementation in @file{money.c}, +and a place to store our unit tests, @file{check_money.c}. We want to +integrate these core files into our build system, and will need some +additional structure. To manage everything we'll use Autoconf, +Automake, and friends (collectively known as Autotools) for this +example. One could do something similar with ordinary Makefiles, but +in the authors' opinion, it is generally easier to use Autotools than +bare Makefiles, and they provide built-in support for running tests. + +Note that this is not the place to explain how Autotools works. If +you need help understanding what's going on beyond the explanations +here, the best place to start is probably Alexandre Duret-Lutz's +excellent +@uref{http://www.lrde.epita.fr/~adl/autotools.html, +Autotools tutorial}. + +The examples in this section are part of the Check distribution; you +don't need to spend time cutting and pasting or (worse) retyping them. +Locate the Check documentation on your system and look in the +@samp{example} directory. The standard directory for GNU/Linux +distributions should be @samp{/usr/share/doc/check/example}. This +directory contains the final version reached the end of the tutorial. If +you want to follow along, create backups of @file{money.h}, +@file{money.c}, and @file{check_money.c}, and then delete the originals. + +We set up a directory structure as follows: +@example +@verbatim +. +|-- Makefile.am +|-- README +|-- configure.ac +|-- src +| |-- Makefile.am +| |-- main.c +| |-- money.c +| `-- money.h +`-- tests + |-- Makefile.am + `-- check_money.c +@end verbatim +@end example + +Note that this is the output of @command{tree}, a great directory +visualization tool. The top-level @file{Makefile.am} is simple; it +merely tells Automake how to process subdirectories: +@example +@verbatim +SUBDIRS = src . tests +@end verbatim +@end example + +Note that @code{tests} comes last, because the code should be testing +an already compiled library. @file{configure.ac} is standard Autoconf +boilerplate, as specified by the Autotools tutorial and as suggested +by @command{autoscan}. The @code{AM_PATH_CHECK()} is the only line +particular to Check @pxref{AM_PATH_CHECK}. + +@file{src/Makefile.am} builds @samp{libmoney} as a Libtool archive, +and links it to an application simply called @command{main}. The +application's behaviour is not important to this tutorial; what's +important is that none of the functions we want to unit test appear in +@file{main.c}; this probably means that the only function in +@file{main.c} should be @code{main()} itself. In order to test the +whole application, unit testing is not appropriate: you should use a +system testing tool like Autotest. If you really want to test +@code{main()} using Check, rename it to something like +@code{_myproject_main()} and write a wrapper around it. + +The primary build instructions for our unit tests are in +@file{tests/Makefile.am}: + +@example +@verbatiminclude example/tests/Makefile.am +@end example + +@code{TESTS} tells Automake which test programs to run for +@command{make check}. Similarly, the @code{check_} prefix in +@code{check_PROGRAMS} actually comes from Automake; it says to build +these programs only when @command{make check} is run. (Recall that +Automake's @code{check} target is the origin of Check's name.) The +@command{check_money} test is a program that we will build from +@file{tests/check_money.c}, linking it against both +@file{src/libmoney.la} and the installed @file{libcheck.la} on our +system. The appropriate compiler and linker flags for using Check are +found in @code{@@CHECK_CFLAGS@@} and @code{@@CHECK_LIBS@@}, values +defined by the @code{AM_PATH_CHECK} macro. + +Now that all this infrastructure is out of the way, we can get on with +development. @file{src/money.h} should only contain standard C header +boilerplate: + +@example +@verbatiminclude example/src/money.1.h +@end example + +@file{src/money.c} should be empty, and @file{tests/check_money.c} +should only contain an empty @code{main()} function: + +@example +@verbatiminclude example/tests/check_money.1.c +@end example + +Create the GNU Build System for the project and then build @file{main} +and @file{libmoney.la} as follows: +@example +@verbatim +$ autoreconf --install +$ ./configure +$ make +@end verbatim +@end example + +(@command{autoreconf} determines which commands are needed in order +for @command{configure} to be created or brought up to date. +Previously one would use a script called @command{autogen.sh} or +@command{bootstrap}, but that practice is unnecessary now.) + +Now build and run the @command{check_money} test with @command{make +check}. If all goes well, @command{make} should report that our tests +passed. No surprise, because there aren't any tests to fail. If you +have problems, make sure to see @ref{AM_PATH_CHECK}. + +This was tested on the i386 ``testing'' distribution of Debian +GNU/Linux (etch) in March 2006, using Autoconf 2.59, Automake 1.9.6, +and Libtool 1.5.22. Please report any problems to +@email{check-devel AT lists.sourceforge.net}. + +@node Test a Little, Creating a Suite, Setting Up the Money Build, Tutorial +@section Test a Little, Code a Little + +The @uref{http://junit.sourceforge.net/doc/testinfected/testing.htm, +Test Infected} article starts out with a @code{Money} class, and so +will we. Of course, we can't do classes with C, but we don't really +need to. The Test Infected approach to writing code says that we +should write the unit test @emph{before} we write the code, and in +this case, we will be even more dogmatic and doctrinaire than the +authors of Test Infected (who clearly don't really get this stuff, +only being some of the originators of the Patterns approach to +software development and OO design). + +Here are the changes to @file{check_money.c} for our first unit test: + +@example +@verbatiminclude check_money.1-2.c.diff +@end example + +@findex fail_unless() +A unit test should just chug along and complete. If it exits early, +or is signaled, it will fail with a generic error message. (Note: it +is conceivable that you expect an early exit, or a signal and there is +functionality in Check to specifically assert that we should expect a +signal or an early exit.) If we want to get some information +about what failed, we need to use the @code{fail_unless()} function. The +function (actually a macro) takes a first Boolean argument, and an error +message to send if the condition is not true. + +@findex fail() +If the Boolean argument is too complicated to elegantly express within +@code{fail_unless()}, there is an alternate function @code{fail()} +that unconditionally fails. The second test inside +@code{test_money_create} above could be rewritten as follows: +@example +@verbatim +if (strcmp (money_currency (m), "USD") != 0) + { + fail ("Currency not set correctly on creation"); + } +@end verbatim +@end example + +@findex fail_if() +There is also a @code{fail_if()} function, which is the +inverse of @code{fail_unless()}. Using it, the above test then +looks like this: +@example +@verbatim +fail_if (strcmp (money_currency (m), "USD") != 0, + "Currency not set correctly on creation"); +@end verbatim +@end example + +For your convenience all fail functions also accepts NULL as the msg +argument and substitutes a suitable message for you. So you could also +write a test as follows: +@example +@verbatim +fail_unless (money_amount (m) == 5, NULL); +@end verbatim +@end example + +This is equivalent to: +@example +@verbatim +fail_unless (money_amount (m) == 5, + "Assertion 'money_amount (m) == 5' failed"); +@end verbatim +@end example + +All fail functions also support @code{varargs} and accept +@code{printf}-style format strings and arguments. This is especially +useful while debugging. With @code{printf}-style formatting the +message could look like this: +@example +@verbatim +fail_unless(money_amount (m) == 5, + "Amount was %d, instead of 5", money_amount (m)); +@end verbatim +@end example + +When we try to compile and run the test suite now using @command{make +check}, we get a whole host of compilation errors. It may seem a bit +strange to deliberately write code that won't compile, but notice what +we are doing: in creating the unit test, we are also defining +requirements for the money interface. Compilation errors are, in a +way, unit test failures of their own, telling us that the +implementation does not match the specification. If all we do is edit +the sources so that the unit test compiles, we are actually making +progress, guided by the unit tests, so that's what we will now do. + +We will patch our header @file{money.h} as follows: + +@example +@verbatiminclude money.1-2.h.diff +@end example + +Our code compiles now, and again passes all of the tests. However, +once we try to @emph{use} the functions in @code{libmoney} in the +@code{main()} of @code{check_money}, we'll run into more problems, as +they haven't actually been implemented yet. + +@node Creating a Suite, SRunner Output, Test a Little, Tutorial +@section Creating a Suite + +To run unit tests with Check, we must create some test cases, +aggregate them into a suite, and run them with a suite runner. That's +a bit of overhead, but it is mostly one-off. Here's a diff for the +new version of @file{check_money.c}. Note that we include stdlib.h to +get the definitions of @code{EXIT_SUCCESS} and @code{EXIT_FAILURE}. + +@example +@verbatiminclude check_money.2-3.c.diff +@end example + +Most of the @code{money_suite()} code should be self-explanatory. We are +creating a suite, creating a test case, adding the test case to the +suite, and adding the unit test we created above to the test case. +Why separate this off into a separate function, rather than inline it +in @code{main()}? Because any new tests will get added in +@code{money_suite()}, but nothing will need to change in @code{main()} +for the rest of this example, so main will stay relatively clean and +simple. + +Unit tests are internally defined as static functions. This means +that the code to add unit tests to test cases must be in the same +compilation unit as the unit tests themselves. This provides another +reason to put the creation of the test suite in a separate function: +you may later want to keep one source file per suite; defining a +uniquely named suite creation function allows you later to define a +header file giving prototypes for all the suite creation functions, +and encapsulate the details of where and how unit tests are defined +behind those functions. See the test program defined for Check itself +for an example of this strategy. + +The code in @code{main()} bears some explanation. We are creating a +suite runner object of type @code{SRunner} from the @code{Suite} we +created in @code{money_suite()}. We then run the suite, using the +@code{CK_NORMAL} flag to specify that we should print a summary of the +run, and list any failures that may have occurred. We capture the +number of failures that occurred during the run, and use that to +decide how to return. The @code{check} target created by Automake +uses the return value to decide whether the tests passed or failed. + +Now that the tests are actually being run by @command{check_money}, we +encounter linker errors again we try out @code{make check}. Try it +for yourself and see. The reason is that the @file{money.c} +implementation of the @file{money.h} interface hasn't been created +yet. Let's go with the fastest solution possible and implement stubs +for each of the functions in @code{money.c}: + +@example +@verbatiminclude money.1-3.c.diff +@end example + +Note that we @code{#include <stdlib.h>} to get the definition of +@code{NULL}. Now, the code compiles and links when we run @code{make +check}, but our unit test fails. Still, this is progress, and we can +focus on making the test pass. + +@node SRunner Output, , Creating a Suite, Tutorial +@section SRunner Output + +@findex srunner_run_all() +The function to run tests in an @code{SRunner} is defined as follows: +@example +@verbatim +void srunner_run_all (SRunner * sr, enum print_output print_mode); +@end verbatim +@end example + +This function does two things: + +@enumerate +@item +It runs all of the unit tests for all of the test cases defined for all +of the suites in the SRunner, and collects the results in the SRunner + +@item +It prints the results according to the @code{print_mode} specified. +@end enumerate + +For SRunners that have already been run, there is also a separate +printing function defined as follows: +@example +@verbatim +void srunner_print (SRunner *sr, enum print_output print_mode); +@end verbatim +@end example + +The enumeration values of @code{print_output} defined in Check that +parameter @code{print_mode} can assume are as follows: + +@table @code +@vindex CK_SILENT +@item CK_SILENT +Specifies that no output is to be generated. If you use this flag, you +either need to programmatically examine the SRunner object, print +separately, or use test logging (@pxref{Test Logging}.) + +@vindex CK_MINIMAL +@item CK_MINIMAL +Only a summary of the test run will be printed (number run, passed, +failed, errors). + +@vindex CK_NORMAL +@item CK_NORMAL +Prints the summary of the run, and prints one message per failed +test. + +@vindex CK_VERBOSE +@item CK_VERBOSE +Prints the summary, and one message per test (passed or failed) + +@vindex CK_ENV +@vindex CK_VERBOSITY +@item CK_ENV +Gets the print mode from the environment variable @code{CK_VERBOSITY}, +which can have the values "silent", "minimal", "normal", "verbose". If +the variable is not found or the value is not recognized, the print +mode is set to @code{CK_NORMAL}. + +@vindex CK_SUBUNIT +@item CK_SUBUNIT +Prints running progress through the @uref{https://launchpad.net/subunit/, +subunit} test runner protocol. See 'subunit support' under the Advanced Features section for more information. +@end table + +With the @code{CK_NORMAL} flag specified in our @code{main()}, let's +rerun make check now. As before, we get the following satisfying +output: +@example +@verbatim +Running suite(s): Money +0%: Checks: 1, Failures: 1, Errors: 0 +check_money.c:10:F:Core:test_money_create: Amount not set correctly on +creation +FAIL: check_money +================================================== +1 of 1 tests failed +Please report to check-devel@lists.sourceforge.net +================================================== +@end verbatim +@end example + +The first number in the summary line tells us that 0% of our tests +passed, and the rest of the line tells us that there was one check in +total, and of those checks, one failure and zero errors. The next +line tells us exactly where that failure occurred, and what kind of +failure it was (P for pass, F for failure, E for error). + +After that we have some higher level output generated by Automake: the +@code{check_money} program failed, and the bug-report address given in +@file{configure.ac} is printed. + +Let's implement the @code{money_amount} function, so that it will pass +its tests. We first have to create a Money structure to hold the +amount, and then implement the function to return the correct amount: + +@example +@verbatiminclude money.3-4.c.diff +@end example + +We will now rerun make check and@dots{} what's this? The output is +now as follows: +@example +@verbatim +Running suite(s): Money +0%: Checks: 1, Failures: 0, Errors: 1 +check_money.c:5:E:Core:test_money_create: (after this point) Received +signal 11 (Segmentation fault) +@end verbatim +@end example + +@findex mark_point() +What does this mean? Note that we now have an error, rather than a +failure. This means that our unit test either exited early, or was +signaled. Next note that the failure message says ``after this +point''; This means that somewhere after the point noted +(@file{check_money.c}, line 5) there was a problem: signal 11 (a.k.a. +segmentation fault). The last point reached is set on entry to the +unit test, and after every call to @code{fail_unless()}, +@code{fail()}, or the special function @code{mark_point()}. For +example, if we wrote some test code as follows: +@example +@verbatim +stuff_that_works (); +mark_point (); +stuff_that_dies (); +@end verbatim +@end example + +then the point returned will be that marked by @code{mark_point()}. + +The reason our test failed so horribly is that we haven't implemented +@code{money_create()} to create any @code{Money}. We'll go ahead and +implement that, the symmetric @code{money_free()}, and +@code{money_currency()} too, in order to make our unit test pass again: + +@example +@verbatiminclude money.4-5.c.diff +@end example + +@node Advanced Features, Conclusion and References, Tutorial, Top +@chapter Advanced Features + +What you've seen so far is all you need for basic unit testing. The +features described in this section are additions to Check that make it +easier for the developer to write, run, and analyse tests. + +@menu +* Running Multiple Cases:: +* No Fork Mode:: +* Test Fixtures:: +* Multiple Suites in one SRunner:: +* Testing Signal Handling and Exit Values:: +* Looping Tests:: +* Test Timeouts:: +* Determining Test Coverage:: +* Test Logging:: +* Subunit Support:: +@end menu + +@node Running Multiple Cases, No Fork Mode, Advanced Features, Advanced Features +@section Running Multiple Cases + +What happens if we pass @code{-1} as the @code{amount} in +@code{money_create()}? What should happen? Let's write a unit test. +Since we are now testing limits, we should also test what happens when +we create @code{Money} where @code{amount == 0}. Let's put these in a +separate test case called ``Limits'' so that @code{money_suite} is +changed like so: + +@example +@verbatiminclude check_money.3-6.c.diff +@end example + +Now we can rerun our suite, and fix the problem(s). Note that errors +in the ``Core'' test case will be reported as ``Core'', and errors in +the ``Limits'' test case will be reported as ``Limits'', giving you +additional information about where things broke. + +@example +@verbatiminclude money.5-6.c.diff +@end example + +@node No Fork Mode, Test Fixtures, Running Multiple Cases, Advanced Features +@section No Fork Mode + +Check normally forks to create a separate address space. This allows +a signal or early exit to be caught and reported, rather than taking +down the entire test program, and is normally very useful. However, +when you are trying to debug why the segmentation fault or other +program error occurred, forking makes it difficult to use debugging +tools. To define fork mode for an @code{SRunner} object, you can do +one of the following: + +@vindex CK_FORK +@findex srunner_set_fork_status() +@enumerate +@item +Define the CK_FORK environment variable to equal ``no''. + +@item +Explicitly define the fork status through the use of the following +function: + +@verbatim +void srunner_set_fork_status (SRunner * sr, enum fork_status fstat); +@end verbatim +@end enumerate + +The enum @code{fork_status} allows the @code{fstat} parameter to +assume the following values: @code{CK_FORK} and @code{CK_NOFORK}. An +explicit call to @code{srunner_set_fork_status()} overrides the +@code{CK_FORK} environment variable. + +@node Test Fixtures, Multiple Suites in one SRunner, No Fork Mode, Advanced Features +@section Test Fixtures + +We may want multiple tests that all use the same Money. In such +cases, rather than setting up and tearing down objects for each unit +test, it may be convenient to add some setup that is constant across +all the tests in a test case. Each such setup/teardown pair is called +a @dfn{test fixture} in test-driven development jargon. + +A fixture is created by defining a setup and/or a teardown function, +and associating it with a test case. There are two kinds of test +fixtures in Check: checked and unchecked fixtures. These are defined +as follows: + +@table @asis +@item Checked fixtures +are run inside the address space created by the fork to create the +unit test. Before each unit test in a test case, the @code{setup()} +function is run, if defined. After each unit test, the +@code{teardown()} function is run, if defined. Since they run inside +the forked address space, if checked fixtures signal or otherwise +fail, they will be caught and reported by the @code{SRunner}. A +checked @code{teardown()} fixture will run even if the unit test +fails. + +@item Unchecked fixtures +are run in the same address space as the test program. Therefore they +may not signal or exit, but may use the fail functions. The unchecked +@code{setup()}, if defined, is run before the test case is +started. The unchecked @code{teardown()}, if defined, is run after the +test case is done. +@end table + +So for a test case that contains @code{check_one()} and +@code{check_two()} unit tests, +@code{checked_setup()}/@code{checked_teardown()} checked fixtures, and +@code{unchecked_setup()}/@code{unchecked_teardown()} unchecked +fixtures, the control flow would be: +@example +@verbatim +unchecked_setup(); +fork(); +checked_setup(); +check_one(); +checked_teardown(); +wait(); +fork(); +checked_setup(); +check_two(); +checked_teardown(); +wait(); +unchecked_teardown(); +@end verbatim +@end example + +@menu +* Test Fixture Examples:: +* Checked vs Unchecked Fixtures:: +@end menu + +@node Test Fixture Examples, Checked vs Unchecked Fixtures, Test Fixtures, Test Fixtures +@subsection Test Fixture Examples + +We create a test fixture in Check as follows: + +@enumerate +@item +Define global variables, and functions to setup and teardown the +globals. The functions both take @code{void} and return @code{void}. +In our example, we'll make @code{five_dollars} be a global created and +freed by @code{setup()} and @code{teardown()} respectively. + +@item +@findex tcase_add_checked_fixture() +Add the @code{setup()} and @code{teardown()} functions to the test +case with @code{tcase_add_checked_fixture()}. In our example, this +belongs in the suite setup function @code{money_suite}. + +@item +Rewrite tests to use the globals. We'll rewrite our first to use +@code{five_dollars}. +@end enumerate + +Note that the functions used for setup and teardown do not need to be +named @code{setup()} and @code{teardown()}, but they must take +@code{void} and return @code{void}. We'll update @file{check_money.c} +as follows: + +@example +@verbatiminclude check_money.6-7.c.diff +@end example + +@node Checked vs Unchecked Fixtures, , Test Fixture Examples, Test Fixtures +@subsection Checked vs Unchecked Fixtures + +Checked fixtures run once for each unit test in a test case, and so +they should not be used for expensive setup. However, if a checked +fixture fails and @code{CK_FORK} mode is being used, it will not bring +down the entire framework. + +On the other hand, unchecked fixtures run once for an entire test +case, as opposed to once per unit test, and so can be used for +expensive setup. However, since they may take down the entire test +program, they should only be used if they are known to be safe. + +Additionally, the isolation of objects created by unchecked fixtures +is not guaranteed by @code{CK_NOFORK} mode. Normally, in +@code{CK_FORK} mode, unit tests may abuse the objects created in an +unchecked fixture with impunity, without affecting other unit tests in +the same test case, because the fork creates a separate address space. +However, in @code{CK_NOFORK} mode, all tests live in the same address +space, and side effects in one test will affect the unchecked fixture +for the other tests. + +A checked fixture will generally not be affected by unit test side +effects, since the @code{setup()} is run before each unit test. There +is an exception for side effects to the total environment in which the +test program lives: for example, if the @code{setup()} function +initializes a file that a unit test then changes, the combination of +the @code{teardown()} function and @code{setup()} fuction must be able +to restore the environment for the next unit test. + +If the @code{setup()} function in a fixture fails, in either checked +or unchecked fixtures, the unit tests for the test case, and the +@code{teardown()} function for the fixture will not be run. A fixture +error will be created and reported to the @code{SRunner}. + +@node Multiple Suites in one SRunner, Testing Signal Handling and Exit Values, Test Fixtures, Advanced Features +@section Multiple Suites in one SRunner + +In a large program, it will be convenient to create multiple suites, +each testing a module of the program. While one can create several +test programs, each running one @code{Suite}, it may be convenient to +create one main test program, and use it to run multiple suites. The +Check test suite provides an example of how to do this. The main +testing program is called @code{check_check}, and has a header file +that declares suite creation functions for all the module tests: +@example +@verbatim +Suite *make_sub_suite (void); +Suite *make_sub2_suite (void); +Suite *make_master_suite (void); +Suite *make_list_suite (void); +Suite *make_msg_suite (void); +Suite *make_log_suite (void); +Suite *make_limit_suite (void); +Suite *make_fork_suite (void); +Suite *make_fixture_suite (void); +Suite *make_pack_suite (void); +@end verbatim +@end example + +@findex srunner_add_suite() +The function @code{srunner_add_suite()} is used to add additional +suites to an @code{SRunner}. Here is the code that sets up and runs +the @code{SRunner} in the @code{main()} function in +@file{check_check_main.c}: +@example +@verbatim +SRunner *sr; +sr = srunner_create (make_master_suite ()); +srunner_add_suite (sr, make_list_suite ()); +srunner_add_suite (sr, make_msg_suite ()); +srunner_add_suite (sr, make_log_suite ()); +srunner_add_suite (sr, make_limit_suite ()); +srunner_add_suite (sr, make_fork_suite ()); +srunner_add_suite (sr, make_fixture_suite ()); +srunner_add_suite (sr, make_pack_suite ()); +@end verbatim +@end example + +@node Testing Signal Handling and Exit Values, Looping Tests, Multiple Suites in one SRunner, Advanced Features +@section Testing Signal Handling and Exit Values + +@findex tcase_add_test_raise_signal() + +To enable testing of signal handling, there is a function +@code{tcase_add_test_raise_signal()} which is used instead of +@code{tcase_add_test()}. This function takes an additional signal +argument, specifying a signal that the test expects to receive. If no +signal is received this is logged as a failure. If a different signal +is received this is logged as an error. + +The signal handling functionality only works in CK_FORK mode. + +@findex tcase_add_exit_test() + +To enable testing of expected exits, there is a function +@code{tcase_add_exit_test()} which is used instead of @code{tcase_add_test()}. +This function takes an additional expected exit value argument, +specifying a value that the test is expected to exit with. If the test +exits with any other value this is logged as a failure. If the test exits +early this is logged as an error. + +The exit handling functionality only works in CK_FORK mode. + +@node Looping Tests, Test Timeouts, Testing Signal Handling and Exit Values, Advanced Features +@section Looping Tests + +Looping tests are tests that are called with a new context for each +loop iteration. This makes them ideal for table based tests. If +loops are used inside ordinary tests to test multiple values, only the +first error will be shown before the test exits. However, looping +tests allow for all errors to be shown at once, which can help out +with debugging. + +@findex tcase_add_loop_test() +Adding a normal test with @code{tcase_add_loop_test()} instead of +@code{tcase_add_test()} will make the test function the body of a +@code{for} loop, with the addition of a fork before each call. The +loop variable @code{_i} is available for use inside the test function; +for example, it could serve as an index into a table. For failures, +the iteration which caused the failure is available in error messages +and logs. + +Start and end values for the loop are supplied when adding the test. +The values are used as in a normal @code{for} loop. Below is some +pseudo-code to show the concept: +@example +@verbatim +for (_i = tfun->loop_start; _i < tfun->loop_end; _i++) +{ + fork(); /* New context */ + tfun->f(_i); /* Call test function */ + wait(); /* Wait for child to terminate */ +} +@end verbatim +@end example + +An example of looping test usage follows: +@example +@verbatim +static const int primes[5] = {2,3,5,7,11}; + +START_TEST (check_is_prime) +{ + fail_unless (is_prime (primes[_i])); +} +END_TEST + +... + +tcase_add_loop_test (tcase, check_is_prime, 0, 5); +@end verbatim +@end example + +Looping tests work in @code{CK_NOFORK} mode as well, but without the +forking. This means that only the first error will be shown. + +@node Test Timeouts, Determining Test Coverage, Looping Tests, Advanced Features +@section Test Timeouts + +@findex tcase_set_timeout() +@vindex CK_DEFAULT_TIMEOUT +@vindex CK_TIMEOUT_MULTIPLIER +To be certain that a test won't hang indefinitely, all tests are run +with a timeout, the default being 4 seconds. If the test is not +finished within that time, it is killed and logged as an error. + +The timeout for a specific test case, which may contain multiple unit +tests, can be changed with the @code{tcase_set_timeout()} function. +The default timeout used for all test cases can be changed with the +environment variable @code{CK_DEFAULT_TIMEOUT}, but this will not +override an explicitly set timeout. Another way to change the timeout +length is to use the @code{CK_TIMEOUT_MULTIPLIER} environment variable, +which multiplies all timeouts, including those set with +@code{tcase_set_timeout()}, with the supplied integer value. All timeout +arguments are in seconds and a timeout of 0 seconds turns off the timeout +functionality. + +Test timeouts are only available in CK_FORK mode. + +@node Determining Test Coverage, Test Logging, Test Timeouts, Advanced Features +@section Determining Test Coverage + +The term @dfn{code coverage} refers to the extent that the statements +of a program are executed during a run. Thus, @dfn{test coverage} +refers to code coverage when executing unit tests. This information +can help you to do two things: + +@itemize +@item +Write better tests that more fully exercise your code, thereby +improving confidence in it. + +@item +Detect dead code that could be factored away. +@end itemize + +Check itself does not provide any means to determine this test +coverage; rather, this is the job of the compiler and its related +tools. In the case of @command{gcc} this information is easy to +obtain, and other compilers should provide similar facilities. + +Using @command{gcc}, first enable test coverage profiling when +building your source by specifying the @option{-fprofile-arcs} and +@option{-ftest-coverage} switches: +@example +@verbatim +$ gcc -g -Wall -fprofile-arcs -ftest-coverage -o foo foo.c foo_check.c +@end verbatim +@end example + +You will see that an additional @file{.gcno} file is created for each +@file{.c} input file. After running your tests the normal way, a +@file{.gcda} file is created for each @file{.gcno} file. These +contain the coverage data in a raw format. To combine this +information and a source file into a more readable format you can use +the @command{gcov} utility: +@example +@verbatim +$ gcov foo.c +@end verbatim +@end example + +This will produce the file @file{foo.c.gcov} which looks like this: +@example +@verbatim + -: 41: * object */ + 18: 42: if (ht->table[p] != NULL) { + -: 43: /* replaces the current entry */ + #####: 44: ht->count--; + #####: 45: ht->size -= ht->table[p]->size + + #####: 46: sizeof(struct hashtable_entry); +@end verbatim +@end example + +As you can see this is an annotated source file with three columns: +usage information, line numbers, and the original source. The usage +information in the first column can either be '-', which means that +this line does not contain code that could be executed; '#####', which +means this line was never executed although it does contain +code---these are the lines that are probably most interesting for you; +or a number, which indicates how often that line was executed. + +This is of course only a very brief overview, but it should illustrate +how determining test coverage generally works, and how it can help +you. For more information or help with other compilers, please refer +to the relevant manuals. + +@node Test Logging, Subunit Support, Determining Test Coverage, Advanced Features +@section Test Logging + +@findex srunner_set_log() +Check supports an operation to log the results of a test run. To use +test logging, call the @code{srunner_set_log()} function with the name +of the log file you wish to create: +@example +@verbatim +SRunner *sr; +sr = srunner_create (make_s1_suite ()); +srunner_add_suite (sr, make_s2_suite ()); +srunner_set_log (sr, "test.log"); +srunner_run_all (sr, CK_NORMAL); +@end verbatim +@end example + +In this example, Check will write the results of the run to +@file{test.log}. The @code{print_mode} argument to +@code{srunner_run_all()} is ignored during test logging; the log will +contain a result entry, organized by suite, for every test run. Here +is an example of test log output: +@example +@verbatim +Running suite S1 +ex_log_output.c:8:P:Core:test_pass: Test passed +ex_log_output.c:14:F:Core:test_fail: Failure +ex_log_output.c:18:E:Core:test_exit: (after this point) Early exit +with return value 1 +Running suite S2 +ex_log_output.c:26:P:Core:test_pass2: Test passed +Results for all suites run: +50%: Checks: 4, Failures: 1, Errors: 1 +@end verbatim +@end example + +@menu +* XML Logging:: +@end menu + +@node XML Logging, , Test Logging, Test Logging +@subsection XML Logging + +@findex srunner_set_xml() +@findex srunner_has_xml() +@findex srunner_xml_fname() +The log can also be written in XML. The following functions define +the interface for XML logs: +@example +@verbatim +void srunner_set_xml (SRunner *sr, const char *fname); +int srunner_has_xml (SRunner *sr); +const char *srunner_xml_fname (SRunner *sr); +@end verbatim +@end example + +The only thing you need to do to get XML output is call +@code{srunner_set_xml()} before the tests are run. Here is an example +of the same log output as before but in XML: +@example +@verbatim +<?xml version="1.0"?> +<testsuites xmlns="http://check.sourceforge.net/ns"> + <datetime>2004-08-20 12:53:32</datetime> + <suite> + <title>S1</title> + <test result="success"> + <path>.</path> + <fn>ex_xml_output.c:8</fn> + <id>test_pass</id> + <description>Core</description> + <message>Passed</message> + </test> + <test result="failure"> + <path>.</path> + <fn>ex_xml_output.c:14</fn> + <id>test_fail</id> + <description>Core</description> + <message>Failure</message> + </test> + <test result="error"> + <path>.</path> + <fn>ex_xml_output.c:18</fn> + <id>test_exit</id> + <description>Core</description> + <message>Early exit with return value 1</message> + </test> + </suite> + <suite> + <title>S2</title> + <test result="success"> + <path>.</path> + <fn>ex_xml_output.c:26</fn> + <id>test_pass2</id> + <description>Core</description> + <message>Passed</message> + </test> + </suite> + <duration>0.304875</duration> +</testsuites> +@end verbatim +@end example + +@node Subunit Support, , Test Logging, Advanced Features +@section Subunit Support + +Check supports running test suites with subunit output. This can be useful to +combine test results from multiple languages, or to perform programmatic +analysis on the results of multiple check test suites or otherise handle test +results in a programmatic manner. Using subunit with check is very straight +forward. There are two steps: +1) In your check test suite driver pass 'CK_SUBUNIT' as the output mode +for your srunner. +@example +@verbatim +SRunner *sr; +sr = srunner_create (make_s1_suite ()); +srunner_add_suite (sr, make_s2_suite ()); +srunner_run_all (sr, CK_SUBUNIT); +@end verbatim +@end example +2) Setup your main language test runner to run your check based test +executable. For instance using python: +@example +@verbatim + +import subunit + +class ShellTests(subunit.ExecTestCase): + """Run some tests from the C codebase.""" + + def test_group_one(self): + """./foo/check_driver""" + + def test_group_two(self): + """./foo/other_driver""" +@end verbatim +@end example + +In this example, running the test suite ShellTests in python (using any test +runner - unittest.py, tribunal, trial, nose or others) will run +./foo/check_driver and ./foo/other_driver and report on their result. + +Subunit is hosted on launchpad - the @uref{https://launchpad.net/subunit/, +subunit} project there contains bug tracker, future plans, and source code +control details. + +@node Conclusion and References, AM_PATH_CHECK, Advanced Features, Top +@chapter Conclusion and References +The tutorial and description of advanced features has provided an +introduction to all of the functionality available in Check. +Hopefully, this is enough to get you started writing unit tests with +Check. All the rest is simply application of what has been learned so +far with repeated application of the ``test a little, code a little'' +strategy. + +For further reference, see Kent Beck, ``Test-Driven Development: By +Example'', 1st ed., Addison-Wesley, 2003. ISBN 0-321-14653-0. + +If you know of other authoritative references to unit testing and +test-driven development, please send us a patch to this manual. + +@node AM_PATH_CHECK, Copying This Manual, Conclusion and References, Top +@chapter AM_PATH_CHECK +@findex AM_PATH_CHECK() + +The @code{AM_PATH_CHECK()} macro is defined in the file +@file{check.m4} which is installed by Check. It has some optional +parameters that you might find useful in your @file{configure.ac}: +@verbatim +AM_PATH_CHECK([MINIMUM-VERSION, + [ACTION-IF-FOUND[,ACTION-IF-NOT-FOUND]]]) +@end verbatim + +@code{AM_PATH_CHECK} does several things: + +@enumerate +@item +It ensures check.h is available + +@item +It ensures a compatible version of Check is installed + +@item +It sets @env{CHECK_CFLAGS} and @env{CHECK_LIBS} for use by Automake. +@end enumerate + +If you include @code{AM_PATH_CHECK()} in @file{configure.ac} and +subsequently see warnings when attempting to create +@command{configure}, it probably means one of the following things: + +@enumerate +@item +You forgot to call @command{aclocal}. @command{autoreconf} will do +this for you. + +@item +@command{aclocal} can't find @file{check.m4}. Here are some possible +solutions: + +@enumerate a +@item +Call @command{aclocal} with @option{-I} set to the location of +@file{check.m4}. This means you have to call both @command{aclocal} and +@command{autoreconf}. + +@item +Add the location of @file{check.m4} to the @samp{dirlist} used by +@command{aclocal} and then call @command{autoreconf}. This means you +need permission to modify the @samp{dirlist}. + +@item +Set @code{ACLOCAL_AMFLAGS} in your top-level @file{Makefile.am} to +include @option{-I DIR} with @code{DIR} being the location of +@file{check.m4}. Then call @command{autoreconf}. +@end enumerate +@end enumerate + +@node Copying This Manual, Index, AM_PATH_CHECK, Top +@appendix Copying This Manual + +@menu +* GNU Free Documentation License:: License for copying this manual. +@end menu + +@include fdl.texi + +@node Index, , Copying This Manual, Top +@unnumbered Index + +@printindex cp + +@bye diff --git a/doc/example/Makefile.am b/doc/example/Makefile.am new file mode 100644 index 0000000..8376833 --- /dev/null +++ b/doc/example/Makefile.am @@ -0,0 +1,3 @@ +## Process this file with automake to produce Makefile.in + +SUBDIRS = src . tests
\ No newline at end of file diff --git a/doc/example/README b/doc/example/README new file mode 100644 index 0000000..bc89980 --- /dev/null +++ b/doc/example/README @@ -0,0 +1,24 @@ +This is the "money example" from the Check tutorial. + +You need the following programs installed on your system: + -- Autoconf 2.59 + -- Automake 1.9.6 + -- Libtool 1.5.22 + -- Check 0.9.3 + +Somewhat earlier versions of these programs might work. + +Then, do as follows: + +$ autoreconf --install +$ ./configure +$ make +$ make check + +Don't do "make install" unless you want to install the money example. + +money.c and money.h are built as a library. src/main.c:main() is a +client of libmoney.la, just as tests/check_money.c:main() is a client +of libmoney.la + +Please send bug reports to check-devel AT lists.sourceforge.net. diff --git a/doc/example/configure.ac b/doc/example/configure.ac new file mode 100644 index 0000000..a25417b --- /dev/null +++ b/doc/example/configure.ac @@ -0,0 +1,46 @@ +# Process this file with autoconf to produce a configure script. + +# Prelude. +AC_PREREQ([2.59]) +AC_INIT([Money], [0.3], [check-devel AT lists.sourceforge.net]) + +# unique source file --- primitive safety check +AC_CONFIG_SRCDIR([src/money.c]) + +# place to put some extra build scripts installed +AC_CONFIG_AUX_DIR([build-aux]) + +# fairly severe build strictness +# change foreign to gnu or gnits to comply with gnu standards +AM_INIT_AUTOMAKE([-Wall -Werror foreign 1.9.6]) + +# Checks for programs. +AC_PROG_CC +AC_PROG_LIBTOOL + +# Checks for libraries. + +# This macro is defined in check.m4 and tests if check.h and +# libcheck.a are installed in your system. It sets CHECK_CFLAGS and +# CHECK_LIBS accordingly. +# AM_PATH_CHECK([MINIMUM-VERSION, +# [ACTION-IF-FOUND [, ACTION-IF-NOT-FOUND]]]) +AM_PATH_CHECK() + +# Checks for header files. +AC_HEADER_STDC +AC_CHECK_HEADERS([stdlib.h]) + +# Checks for typedefs, structures, and compiler characteristics. + +# Checks for library functions. +AC_FUNC_MALLOC + +# Output files +AC_CONFIG_HEADERS([config.h]) + +AC_CONFIG_FILES([Makefile + src/Makefile + tests/Makefile]) + +AC_OUTPUT diff --git a/doc/example/src/Makefile.am b/doc/example/src/Makefile.am new file mode 100644 index 0000000..0ab2add --- /dev/null +++ b/doc/example/src/Makefile.am @@ -0,0 +1,8 @@ +## Process this file with automake to produce Makefile.in + +lib_LTLIBRARIES = libmoney.la +libmoney_la_SOURCES = money.c money.h + +bin_PROGRAMS = main +main_SOURCES = main.c +main_LDADD = libmoney.la diff --git a/doc/example/src/main.c b/doc/example/src/main.c new file mode 100644 index 0000000..caeae4a --- /dev/null +++ b/doc/example/src/main.c @@ -0,0 +1,12 @@ +#include "money.h" + +/* only main should be in this file, to make all other functions in + the prograble testable by Check. in order to test main(), use a + whole program testing framework like Autotest. +*/ + +int +main (void) +{ + return 0; +} diff --git a/doc/example/src/money.1.c b/doc/example/src/money.1.c new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/doc/example/src/money.1.c diff --git a/doc/example/src/money.1.h b/doc/example/src/money.1.h new file mode 100644 index 0000000..d1b2094 --- /dev/null +++ b/doc/example/src/money.1.h @@ -0,0 +1,4 @@ +#ifndef MONEY_H +#define MONEY_H + +#endif /* MONEY_H */ diff --git a/doc/example/src/money.2.h b/doc/example/src/money.2.h new file mode 100644 index 0000000..1897415 --- /dev/null +++ b/doc/example/src/money.2.h @@ -0,0 +1,11 @@ +#ifndef MONEY_H +#define MONEY_H + +typedef struct Money Money; + +Money *money_create (int amount, char *currency); +int money_amount (Money * m); +char *money_currency (Money * m); +void money_free (Money * m); + +#endif /* MONEY_H */ diff --git a/doc/example/src/money.3.c b/doc/example/src/money.3.c new file mode 100644 index 0000000..52ac9ea --- /dev/null +++ b/doc/example/src/money.3.c @@ -0,0 +1,26 @@ +#include <stdlib.h> +#include "money.h" + +Money * +money_create (int amount, char *currency) +{ + return NULL; +} + +int +money_amount (Money * m) +{ + return 0; +} + +char * +money_currency (Money * m) +{ + return NULL; +} + +void +money_free (Money * m) +{ + return; +} diff --git a/doc/example/src/money.4.c b/doc/example/src/money.4.c new file mode 100644 index 0000000..e925672 --- /dev/null +++ b/doc/example/src/money.4.c @@ -0,0 +1,31 @@ +#include <stdlib.h> +#include "money.h" + +struct Money +{ + int amount; +}; + +Money * +money_create (int amount, char *currency) +{ + return NULL; +} + +int +money_amount (Money * m) +{ + return m->amount; +} + +char * +money_currency (Money * m) +{ + return NULL; +} + +void +money_free (Money * m) +{ + return; +} diff --git a/doc/example/src/money.5.c b/doc/example/src/money.5.c new file mode 100644 index 0000000..64267a9 --- /dev/null +++ b/doc/example/src/money.5.c @@ -0,0 +1,41 @@ +#include <stdlib.h> +#include "money.h" + +struct Money +{ + int amount; + char *currency; +}; + +Money * +money_create (int amount, char *currency) +{ + Money *m = malloc (sizeof (Money)); + if (m == NULL) + { + return NULL; + } + + m->amount = amount; + m->currency = currency; + return m; +} + +int +money_amount (Money * m) +{ + return m->amount; +} + +char * +money_currency (Money * m) +{ + return m->currency; +} + +void +money_free (Money * m) +{ + free (m); + return; +} diff --git a/doc/example/src/money.6.c b/doc/example/src/money.6.c new file mode 100644 index 0000000..47f09bb --- /dev/null +++ b/doc/example/src/money.6.c @@ -0,0 +1,46 @@ +#include <stdlib.h> +#include "money.h" + +struct Money +{ + int amount; + char *currency; +}; + +Money * +money_create (int amount, char *currency) +{ + if (amount < 0) + { + return NULL; + } + + Money *m = malloc (sizeof (Money)); + if (m == NULL) + { + return NULL; + } + + m->amount = amount; + m->currency = currency; + return m; +} + +int +money_amount (Money * m) +{ + return m->amount; +} + +char * +money_currency (Money * m) +{ + return m->currency; +} + +void +money_free (Money * m) +{ + free (m); + return; +} diff --git a/doc/example/src/money.c b/doc/example/src/money.c new file mode 100644 index 0000000..47f09bb --- /dev/null +++ b/doc/example/src/money.c @@ -0,0 +1,46 @@ +#include <stdlib.h> +#include "money.h" + +struct Money +{ + int amount; + char *currency; +}; + +Money * +money_create (int amount, char *currency) +{ + if (amount < 0) + { + return NULL; + } + + Money *m = malloc (sizeof (Money)); + if (m == NULL) + { + return NULL; + } + + m->amount = amount; + m->currency = currency; + return m; +} + +int +money_amount (Money * m) +{ + return m->amount; +} + +char * +money_currency (Money * m) +{ + return m->currency; +} + +void +money_free (Money * m) +{ + free (m); + return; +} diff --git a/doc/example/src/money.h b/doc/example/src/money.h new file mode 100644 index 0000000..1897415 --- /dev/null +++ b/doc/example/src/money.h @@ -0,0 +1,11 @@ +#ifndef MONEY_H +#define MONEY_H + +typedef struct Money Money; + +Money *money_create (int amount, char *currency); +int money_amount (Money * m); +char *money_currency (Money * m); +void money_free (Money * m); + +#endif /* MONEY_H */ diff --git a/doc/example/tests/Makefile.am b/doc/example/tests/Makefile.am new file mode 100644 index 0000000..729a610 --- /dev/null +++ b/doc/example/tests/Makefile.am @@ -0,0 +1,7 @@ +## Process this file with automake to produce Makefile.in + +TESTS = check_money +check_PROGRAMS = check_money +check_money_SOURCES = check_money.c $(top_builddir)/src/money.h +check_money_CFLAGS = @CHECK_CFLAGS@ +check_money_LDADD = $(top_builddir)/src/libmoney.la @CHECK_LIBS@ diff --git a/doc/example/tests/check_money.1.c b/doc/example/tests/check_money.1.c new file mode 100644 index 0000000..398ec67 --- /dev/null +++ b/doc/example/tests/check_money.1.c @@ -0,0 +1,5 @@ +int +main (void) +{ + return 0; +} diff --git a/doc/example/tests/check_money.2.c b/doc/example/tests/check_money.2.c new file mode 100644 index 0000000..7c7c9d4 --- /dev/null +++ b/doc/example/tests/check_money.2.c @@ -0,0 +1,20 @@ +#include <check.h> +#include "../src/money.h" + +START_TEST (test_money_create) +{ + Money *m; + m = money_create (5, "USD"); + fail_unless (money_amount (m) == 5, + "Amount not set correctly on creation"); + fail_unless (strcmp (money_currency (m), "USD") == 0, + "Currency not set correctly on creation"); + money_free (m); +} +END_TEST + +int +main (void) +{ + return 0; +} diff --git a/doc/example/tests/check_money.3.c b/doc/example/tests/check_money.3.c new file mode 100644 index 0000000..a19cf8b --- /dev/null +++ b/doc/example/tests/check_money.3.c @@ -0,0 +1,40 @@ +#include <stdlib.h> +#include <check.h> +#include "../src/money.h" + +START_TEST (test_money_create) +{ + Money *m; + m = money_create (5, "USD"); + fail_unless (money_amount (m) == 5, + "Amount not set correctly on creation"); + fail_unless (strcmp (money_currency (m), "USD") == 0, + "Currency not set correctly on creation"); + money_free (m); +} +END_TEST + +Suite * +money_suite (void) +{ + Suite *s = suite_create ("Money"); + + /* Core test case */ + TCase *tc_core = tcase_create ("Core"); + tcase_add_test (tc_core, test_money_create); + suite_add_tcase (s, tc_core); + + return s; +} + +int +main (void) +{ + int number_failed; + Suite *s = money_suite (); + SRunner *sr = srunner_create (s); + srunner_run_all (sr, CK_NORMAL); + number_failed = srunner_ntests_failed (sr); + srunner_free (sr); + return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/doc/example/tests/check_money.6.c b/doc/example/tests/check_money.6.c new file mode 100644 index 0000000..f47fc77 --- /dev/null +++ b/doc/example/tests/check_money.6.c @@ -0,0 +1,63 @@ +#include <stdlib.h> +#include <check.h> +#include "../src/money.h" + +START_TEST (test_money_create) +{ + Money *m; + m = money_create (5, "USD"); + fail_unless (money_amount (m) == 5, + "Amount not set correctly on creation"); + fail_unless (strcmp (money_currency (m), "USD") == 0, + "Currency not set correctly on creation"); + money_free (m); +} +END_TEST + +START_TEST (test_money_create_neg) +{ + Money *m = money_create (-1, "USD"); + fail_unless (m == NULL, + "NULL should be returned on attempt to create with " + "a negative amount"); +} +END_TEST + +START_TEST (test_money_create_zero) +{ + Money *m = money_create (0, "USD"); + fail_unless (money_amount (m) == 0, + "Zero is a valid amount of money"); +} +END_TEST + +Suite * +money_suite (void) +{ + Suite *s = suite_create ("Money"); + + /* Core test case */ + TCase *tc_core = tcase_create ("Core"); + tcase_add_test (tc_core, test_money_create); + suite_add_tcase (s, tc_core); + + /* Limits test case */ + TCase *tc_limits = tcase_create ("Limits"); + tcase_add_test (tc_limits, test_money_create_neg); + tcase_add_test (tc_limits, test_money_create_zero); + suite_add_tcase (s, tc_limits); + + return s; +} + +int +main (void) +{ + int number_failed; + Suite *s = money_suite (); + SRunner *sr = srunner_create (s); + srunner_run_all (sr, CK_NORMAL); + number_failed = srunner_ntests_failed (sr); + srunner_free (sr); + return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/doc/example/tests/check_money.7.c b/doc/example/tests/check_money.7.c new file mode 100644 index 0000000..b935998 --- /dev/null +++ b/doc/example/tests/check_money.7.c @@ -0,0 +1,75 @@ +#include <stdlib.h> +#include <check.h> +#include "../src/money.h" + +Money *five_dollars; + +void +setup (void) +{ + five_dollars = money_create (5, "USD"); +} + +void +teardown (void) +{ + money_free (five_dollars); +} + +START_TEST (test_money_create) +{ + fail_unless (money_amount (five_dollars) == 5, + "Amount not set correctly on creation"); + fail_unless (strcmp (money_currency (five_dollars), "USD") == 0, + "Currency not set correctly on creation"); +} +END_TEST + +START_TEST (test_money_create_neg) +{ + Money *m = money_create (-1, "USD"); + fail_unless (m == NULL, + "NULL should be returned on attempt to create with " + "a negative amount"); +} +END_TEST + +START_TEST (test_money_create_zero) +{ + Money *m = money_create (0, "USD"); + fail_unless (money_amount (m) == 0, + "Zero is a valid amount of money"); +} +END_TEST + +Suite * +money_suite (void) +{ + Suite *s = suite_create ("Money"); + + /* Core test case */ + TCase *tc_core = tcase_create ("Core"); + tcase_add_checked_fixture (tc_core, setup, teardown); + tcase_add_test (tc_core, test_money_create); + suite_add_tcase (s, tc_core); + + /* Limits test case */ + TCase *tc_limits = tcase_create ("Limits"); + tcase_add_test (tc_limits, test_money_create_neg); + tcase_add_test (tc_limits, test_money_create_zero); + suite_add_tcase (s, tc_limits); + + return s; +} + +int +main (void) +{ + int number_failed; + Suite *s = money_suite (); + SRunner *sr = srunner_create (s); + srunner_run_all (sr, CK_NORMAL); + number_failed = srunner_ntests_failed (sr); + srunner_free (sr); + return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/doc/example/tests/check_money.c b/doc/example/tests/check_money.c new file mode 100644 index 0000000..b935998 --- /dev/null +++ b/doc/example/tests/check_money.c @@ -0,0 +1,75 @@ +#include <stdlib.h> +#include <check.h> +#include "../src/money.h" + +Money *five_dollars; + +void +setup (void) +{ + five_dollars = money_create (5, "USD"); +} + +void +teardown (void) +{ + money_free (five_dollars); +} + +START_TEST (test_money_create) +{ + fail_unless (money_amount (five_dollars) == 5, + "Amount not set correctly on creation"); + fail_unless (strcmp (money_currency (five_dollars), "USD") == 0, + "Currency not set correctly on creation"); +} +END_TEST + +START_TEST (test_money_create_neg) +{ + Money *m = money_create (-1, "USD"); + fail_unless (m == NULL, + "NULL should be returned on attempt to create with " + "a negative amount"); +} +END_TEST + +START_TEST (test_money_create_zero) +{ + Money *m = money_create (0, "USD"); + fail_unless (money_amount (m) == 0, + "Zero is a valid amount of money"); +} +END_TEST + +Suite * +money_suite (void) +{ + Suite *s = suite_create ("Money"); + + /* Core test case */ + TCase *tc_core = tcase_create ("Core"); + tcase_add_checked_fixture (tc_core, setup, teardown); + tcase_add_test (tc_core, test_money_create); + suite_add_tcase (s, tc_core); + + /* Limits test case */ + TCase *tc_limits = tcase_create ("Limits"); + tcase_add_test (tc_limits, test_money_create_neg); + tcase_add_test (tc_limits, test_money_create_zero); + suite_add_tcase (s, tc_limits); + + return s; +} + +int +main (void) +{ + int number_failed; + Suite *s = money_suite (); + SRunner *sr = srunner_create (s); + srunner_run_all (sr, CK_NORMAL); + number_failed = srunner_ntests_failed (sr); + srunner_free (sr); + return (number_failed == 0) ? EXIT_SUCCESS : EXIT_FAILURE; +} diff --git a/doc/fdl.texi b/doc/fdl.texi new file mode 100644 index 0000000..fe78df8 --- /dev/null +++ b/doc/fdl.texi @@ -0,0 +1,452 @@ + +@node GNU Free Documentation License +@appendixsec GNU Free Documentation License + +@cindex FDL, GNU Free Documentation License +@center Version 1.2, November 2002 + +@display +Copyright @copyright{} 2000,2001,2002 Free Software Foundation, Inc. +51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. +@end display + +@enumerate 0 +@item +PREAMBLE + +The purpose of this License is to make a manual, textbook, or other +functional and useful document @dfn{free} in the sense of freedom: to +assure everyone the effective freedom to copy and redistribute it, +with or without modifying it, either commercially or noncommercially. +Secondarily, this License preserves for the author and publisher a way +to get credit for their work, while not being considered responsible +for modifications made by others. + +This License is a kind of ``copyleft'', which means that derivative +works of the document must themselves be free in the same sense. It +complements the GNU General Public License, which is a copyleft +license designed for free software. + +We have designed this License in order to use it for manuals for free +software, because free software needs free documentation: a free +program should come with manuals providing the same freedoms that the +software does. But this License is not limited to software manuals; +it can be used for any textual work, regardless of subject matter or +whether it is published as a printed book. We recommend this License +principally for works whose purpose is instruction or reference. + +@item +APPLICABILITY AND DEFINITIONS + +This License applies to any manual or other work, in any medium, that +contains a notice placed by the copyright holder saying it can be +distributed under the terms of this License. Such a notice grants a +world-wide, royalty-free license, unlimited in duration, to use that +work under the conditions stated herein. The ``Document'', below, +refers to any such manual or work. Any member of the public is a +licensee, and is addressed as ``you''. You accept the license if you +copy, modify or distribute the work in a way requiring permission +under copyright law. + +A ``Modified Version'' of the Document means any work containing the +Document or a portion of it, either copied verbatim, or with +modifications and/or translated into another language. + +A ``Secondary Section'' is a named appendix or a front-matter section +of the Document that deals exclusively with the relationship of the +publishers or authors of the Document to the Document's overall +subject (or to related matters) and contains nothing that could fall +directly within that overall subject. (Thus, if the Document is in +part a textbook of mathematics, a Secondary Section may not explain +any mathematics.) The relationship could be a matter of historical +connection with the subject or with related matters, or of legal, +commercial, philosophical, ethical or political position regarding +them. + +The ``Invariant Sections'' are certain Secondary Sections whose titles +are designated, as being those of Invariant Sections, in the notice +that says that the Document is released under this License. If a +section does not fit the above definition of Secondary then it is not +allowed to be designated as Invariant. The Document may contain zero +Invariant Sections. If the Document does not identify any Invariant +Sections then there are none. + +The ``Cover Texts'' are certain short passages of text that are listed, +as Front-Cover Texts or Back-Cover Texts, in the notice that says that +the Document is released under this License. A Front-Cover Text may +be at most 5 words, and a Back-Cover Text may be at most 25 words. + +A ``Transparent'' copy of the Document means a machine-readable copy, +represented in a format whose specification is available to the +general public, that is suitable for revising the document +straightforwardly with generic text editors or (for images composed of +pixels) generic paint programs or (for drawings) some widely available +drawing editor, and that is suitable for input to text formatters or +for automatic translation to a variety of formats suitable for input +to text formatters. A copy made in an otherwise Transparent file +format whose markup, or absence of markup, has been arranged to thwart +or discourage subsequent modification by readers is not Transparent. +An image format is not Transparent if used for any substantial amount +of text. A copy that is not ``Transparent'' is called ``Opaque''. + +Examples of suitable formats for Transparent copies include plain +@sc{ascii} without markup, Texinfo input format, La@TeX{} input +format, @acronym{SGML} or @acronym{XML} using a publicly available +@acronym{DTD}, and standard-conforming simple @acronym{HTML}, +PostScript or @acronym{PDF} designed for human modification. Examples +of transparent image formats include @acronym{PNG}, @acronym{XCF} and +@acronym{JPG}. Opaque formats include proprietary formats that can be +read and edited only by proprietary word processors, @acronym{SGML} or +@acronym{XML} for which the @acronym{DTD} and/or processing tools are +not generally available, and the machine-generated @acronym{HTML}, +PostScript or @acronym{PDF} produced by some word processors for +output purposes only. + +The ``Title Page'' means, for a printed book, the title page itself, +plus such following pages as are needed to hold, legibly, the material +this License requires to appear in the title page. For works in +formats which do not have any title page as such, ``Title Page'' means +the text near the most prominent appearance of the work's title, +preceding the beginning of the body of the text. + +A section ``Entitled XYZ'' means a named subunit of the Document whose +title either is precisely XYZ or contains XYZ in parentheses following +text that translates XYZ in another language. (Here XYZ stands for a +specific section name mentioned below, such as ``Acknowledgements'', +``Dedications'', ``Endorsements'', or ``History''.) To ``Preserve the Title'' +of such a section when you modify the Document means that it remains a +section ``Entitled XYZ'' according to this definition. + +The Document may include Warranty Disclaimers next to the notice which +states that this License applies to the Document. These Warranty +Disclaimers are considered to be included by reference in this +License, but only as regards disclaiming warranties: any other +implication that these Warranty Disclaimers may have is void and has +no effect on the meaning of this License. + +@item +VERBATIM COPYING + +You may copy and distribute the Document in any medium, either +commercially or noncommercially, provided that this License, the +copyright notices, and the license notice saying this License applies +to the Document are reproduced in all copies, and that you add no other +conditions whatsoever to those of this License. You may not use +technical measures to obstruct or control the reading or further +copying of the copies you make or distribute. However, you may accept +compensation in exchange for copies. If you distribute a large enough +number of copies you must also follow the conditions in section 3. + +You may also lend copies, under the same conditions stated above, and +you may publicly display copies. + +@item +COPYING IN QUANTITY + +If you publish printed copies (or copies in media that commonly have +printed covers) of the Document, numbering more than 100, and the +Document's license notice requires Cover Texts, you must enclose the +copies in covers that carry, clearly and legibly, all these Cover +Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on +the back cover. Both covers must also clearly and legibly identify +you as the publisher of these copies. The front cover must present +the full title with all words of the title equally prominent and +visible. You may add other material on the covers in addition. +Copying with changes limited to the covers, as long as they preserve +the title of the Document and satisfy these conditions, can be treated +as verbatim copying in other respects. + +If the required texts for either cover are too voluminous to fit +legibly, you should put the first ones listed (as many as fit +reasonably) on the actual cover, and continue the rest onto adjacent +pages. + +If you publish or distribute Opaque copies of the Document numbering +more than 100, you must either include a machine-readable Transparent +copy along with each Opaque copy, or state in or with each Opaque copy +a computer-network location from which the general network-using +public has access to download using public-standard network protocols +a complete Transparent copy of the Document, free of added material. +If you use the latter option, you must take reasonably prudent steps, +when you begin distribution of Opaque copies in quantity, to ensure +that this Transparent copy will remain thus accessible at the stated +location until at least one year after the last time you distribute an +Opaque copy (directly or through your agents or retailers) of that +edition to the public. + +It is requested, but not required, that you contact the authors of the +Document well before redistributing any large number of copies, to give +them a chance to provide you with an updated version of the Document. + +@item +MODIFICATIONS + +You may copy and distribute a Modified Version of the Document under +the conditions of sections 2 and 3 above, provided that you release +the Modified Version under precisely this License, with the Modified +Version filling the role of the Document, thus licensing distribution +and modification of the Modified Version to whoever possesses a copy +of it. In addition, you must do these things in the Modified Version: + +@enumerate A +@item +Use in the Title Page (and on the covers, if any) a title distinct +from that of the Document, and from those of previous versions +(which should, if there were any, be listed in the History section +of the Document). You may use the same title as a previous version +if the original publisher of that version gives permission. + +@item +List on the Title Page, as authors, one or more persons or entities +responsible for authorship of the modifications in the Modified +Version, together with at least five of the principal authors of the +Document (all of its principal authors, if it has fewer than five), +unless they release you from this requirement. + +@item +State on the Title page the name of the publisher of the +Modified Version, as the publisher. + +@item +Preserve all the copyright notices of the Document. + +@item +Add an appropriate copyright notice for your modifications +adjacent to the other copyright notices. + +@item +Include, immediately after the copyright notices, a license notice +giving the public permission to use the Modified Version under the +terms of this License, in the form shown in the Addendum below. + +@item +Preserve in that license notice the full lists of Invariant Sections +and required Cover Texts given in the Document's license notice. + +@item +Include an unaltered copy of this License. + +@item +Preserve the section Entitled ``History'', Preserve its Title, and add +to it an item stating at least the title, year, new authors, and +publisher of the Modified Version as given on the Title Page. If +there is no section Entitled ``History'' in the Document, create one +stating the title, year, authors, and publisher of the Document as +given on its Title Page, then add an item describing the Modified +Version as stated in the previous sentence. + +@item +Preserve the network location, if any, given in the Document for +public access to a Transparent copy of the Document, and likewise +the network locations given in the Document for previous versions +it was based on. These may be placed in the ``History'' section. +You may omit a network location for a work that was published at +least four years before the Document itself, or if the original +publisher of the version it refers to gives permission. + +@item +For any section Entitled ``Acknowledgements'' or ``Dedications'', Preserve +the Title of the section, and preserve in the section all the +substance and tone of each of the contributor acknowledgements and/or +dedications given therein. + +@item +Preserve all the Invariant Sections of the Document, +unaltered in their text and in their titles. Section numbers +or the equivalent are not considered part of the section titles. + +@item +Delete any section Entitled ``Endorsements''. Such a section +may not be included in the Modified Version. + +@item +Do not retitle any existing section to be Entitled ``Endorsements'' or +to conflict in title with any Invariant Section. + +@item +Preserve any Warranty Disclaimers. +@end enumerate + +If the Modified Version includes new front-matter sections or +appendices that qualify as Secondary Sections and contain no material +copied from the Document, you may at your option designate some or all +of these sections as invariant. To do this, add their titles to the +list of Invariant Sections in the Modified Version's license notice. +These titles must be distinct from any other section titles. + +You may add a section Entitled ``Endorsements'', provided it contains +nothing but endorsements of your Modified Version by various +parties---for example, statements of peer review or that the text has +been approved by an organization as the authoritative definition of a +standard. + +You may add a passage of up to five words as a Front-Cover Text, and a +passage of up to 25 words as a Back-Cover Text, to the end of the list +of Cover Texts in the Modified Version. Only one passage of +Front-Cover Text and one of Back-Cover Text may be added by (or +through arrangements made by) any one entity. If the Document already +includes a cover text for the same cover, previously added by you or +by arrangement made by the same entity you are acting on behalf of, +you may not add another; but you may replace the old one, on explicit +permission from the previous publisher that added the old one. + +The author(s) and publisher(s) of the Document do not by this License +give permission to use their names for publicity for or to assert or +imply endorsement of any Modified Version. + +@item +COMBINING DOCUMENTS + +You may combine the Document with other documents released under this +License, under the terms defined in section 4 above for modified +versions, provided that you include in the combination all of the +Invariant Sections of all of the original documents, unmodified, and +list them all as Invariant Sections of your combined work in its +license notice, and that you preserve all their Warranty Disclaimers. + +The combined work need only contain one copy of this License, and +multiple identical Invariant Sections may be replaced with a single +copy. If there are multiple Invariant Sections with the same name but +different contents, make the title of each such section unique by +adding at the end of it, in parentheses, the name of the original +author or publisher of that section if known, or else a unique number. +Make the same adjustment to the section titles in the list of +Invariant Sections in the license notice of the combined work. + +In the combination, you must combine any sections Entitled ``History'' +in the various original documents, forming one section Entitled +``History''; likewise combine any sections Entitled ``Acknowledgements'', +and any sections Entitled ``Dedications''. You must delete all +sections Entitled ``Endorsements.'' + +@item +COLLECTIONS OF DOCUMENTS + +You may make a collection consisting of the Document and other documents +released under this License, and replace the individual copies of this +License in the various documents with a single copy that is included in +the collection, provided that you follow the rules of this License for +verbatim copying of each of the documents in all other respects. + +You may extract a single document from such a collection, and distribute +it individually under this License, provided you insert a copy of this +License into the extracted document, and follow this License in all +other respects regarding verbatim copying of that document. + +@item +AGGREGATION WITH INDEPENDENT WORKS + +A compilation of the Document or its derivatives with other separate +and independent documents or works, in or on a volume of a storage or +distribution medium, is called an ``aggregate'' if the copyright +resulting from the compilation is not used to limit the legal rights +of the compilation's users beyond what the individual works permit. +When the Document is included in an aggregate, this License does not +apply to the other works in the aggregate which are not themselves +derivative works of the Document. + +If the Cover Text requirement of section 3 is applicable to these +copies of the Document, then if the Document is less than one half of +the entire aggregate, the Document's Cover Texts may be placed on +covers that bracket the Document within the aggregate, or the +electronic equivalent of covers if the Document is in electronic form. +Otherwise they must appear on printed covers that bracket the whole +aggregate. + +@item +TRANSLATION + +Translation is considered a kind of modification, so you may +distribute translations of the Document under the terms of section 4. +Replacing Invariant Sections with translations requires special +permission from their copyright holders, but you may include +translations of some or all Invariant Sections in addition to the +original versions of these Invariant Sections. You may include a +translation of this License, and all the license notices in the +Document, and any Warranty Disclaimers, provided that you also include +the original English version of this License and the original versions +of those notices and disclaimers. In case of a disagreement between +the translation and the original version of this License or a notice +or disclaimer, the original version will prevail. + +If a section in the Document is Entitled ``Acknowledgements'', +``Dedications'', or ``History'', the requirement (section 4) to Preserve +its Title (section 1) will typically require changing the actual +title. + +@item +TERMINATION + +You may not copy, modify, sublicense, or distribute the Document except +as expressly provided for under this License. Any other attempt to +copy, modify, sublicense or distribute the Document is void, and will +automatically terminate your rights under this License. However, +parties who have received copies, or rights, from you under this +License will not have their licenses terminated so long as such +parties remain in full compliance. + +@item +FUTURE REVISIONS OF THIS LICENSE + +The Free Software Foundation may publish new, revised versions +of the GNU Free Documentation License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. See +@uref{http://www.gnu.org/copyleft/}. + +Each version of the License is given a distinguishing version number. +If the Document specifies that a particular numbered version of this +License ``or any later version'' applies to it, you have the option of +following the terms and conditions either of that specified version or +of any later version that has been published (not as a draft) by the +Free Software Foundation. If the Document does not specify a version +number of this License, you may choose any version ever published (not +as a draft) by the Free Software Foundation. +@end enumerate + +@page +@appendixsubsec ADDENDUM: How to use this License for your documents + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and +license notices just after the title page: + +@smallexample +@group + Copyright (C) @var{year} @var{your name}. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.2 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover + Texts. A copy of the license is included in the section entitled ``GNU + Free Documentation License''. +@end group +@end smallexample + +If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, +replace the ``with...Texts.'' line with this: + +@smallexample +@group + with the Invariant Sections being @var{list their titles}, with + the Front-Cover Texts being @var{list}, and with the Back-Cover Texts + being @var{list}. +@end group +@end smallexample + +If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + +If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of +free software license, such as the GNU General Public License, +to permit their use in free software. + +@c Local Variables: +@c ispell-local-pdict: "ispell-dict" +@c End: + diff --git a/doc/stamp-vti b/doc/stamp-vti new file mode 100644 index 0000000..303f134 --- /dev/null +++ b/doc/stamp-vti @@ -0,0 +1,4 @@ +@set UPDATED 23 September 2009 +@set UPDATED-MONTH September 2009 +@set EDITION 0.9.8 +@set VERSION 0.9.8 diff --git a/doc/version.texi b/doc/version.texi new file mode 100644 index 0000000..303f134 --- /dev/null +++ b/doc/version.texi @@ -0,0 +1,4 @@ +@set UPDATED 23 September 2009 +@set UPDATED-MONTH September 2009 +@set EDITION 0.9.8 +@set VERSION 0.9.8 |