summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFrank Ch. Eigler <fche@redhat.com>2019-05-16 16:00:52 -0400
committerFrank Ch. Eigler <fche@redhat.com>2019-09-03 11:57:37 -0400
commit8949feb2ce3f9a14658f5e7893dca3196eb966d1 (patch)
tree994d62e37ca4e49ed9f99f35187afbad611bcb52
parentc950e8a995dfee0b6094c9854581b103754c6bb6 (diff)
downloadelfutils-8949feb2ce3f9a14658f5e7893dca3196eb966d1.tar.gz
elfutils-dbgserver prototype, squashed commits
-rw-r--r--Makefile.am4
-rw-r--r--config/Makefile.am3
-rw-r--r--config/dbgserver.service15
-rw-r--r--config/dbgserver.sysconfig9
-rw-r--r--config/elfutils.spec.in79
-rw-r--r--configure.ac21
-rw-r--r--dbgserver/Makefile.am102
-rw-r--r--dbgserver/dbgserver-client.c469
-rw-r--r--dbgserver/dbgserver-client.h27
-rw-r--r--dbgserver/dbgserver-find.1131
-rw-r--r--dbgserver/dbgserver-find.c82
-rw-r--r--dbgserver/dbgserver.8205
-rw-r--r--dbgserver/dbgserver.cxx1959
-rw-r--r--dbgserver/libdbgserver.map7
-rw-r--r--libdwfl/Makefile.am3
-rw-r--r--libdwfl/dwfl_build_id_find_elf.c28
-rw-r--r--libdwfl/find-debuginfo.c29
-rw-r--r--m4/ax_check_compile_flag.m474
-rw-r--r--m4/ax_cxx_compile_stdcxx.m4556
-rw-r--r--src/Makefile.am1
-rw-r--r--tests/Makefile.am7
-rw-r--r--tests/dbgserver_build_id_find.c60
-rwxr-xr-xtests/run-dbgserver-find.sh56
-rw-r--r--tests/testfile-dbgserver.debug.bz2bin0 -> 333589 bytes
-rwxr-xr-xtests/testfile-dbgserver.exec.bz2bin0 -> 108718 bytes
25 files changed, 3919 insertions, 8 deletions
diff --git a/Makefile.am b/Makefile.am
index 52f64fc9..080af2aa 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -29,6 +29,10 @@ pkginclude_HEADERS = version.h
SUBDIRS = config m4 lib libelf libcpu backends libebl libdwelf libdwfl libdw \
libasm src po doc tests
+if DBGSERVER
+SUBDIRS += dbgserver
+endif
+
EXTRA_DIST = elfutils.spec GPG-KEY NOTES CONTRIBUTING \
COPYING COPYING-GPLV2 COPYING-LGPLV3
diff --git a/config/Makefile.am b/config/Makefile.am
index 9d292cee..e442bb5c 100644
--- a/config/Makefile.am
+++ b/config/Makefile.am
@@ -28,8 +28,7 @@
## the GNU Lesser General Public License along with this program. If
## not, see <http://www.gnu.org/licenses/>.
##
-EXTRA_DIST = elfutils.spec.in known-dwarf.awk 10-default-yama-scope.conf
- libelf.pc.in libdw.pc.in
+EXTRA_DIST = elfutils.spec.in known-dwarf.awk 10-default-yama-scope.conf libelf.pc.in libdw.pc.in dbgserver.service dbgserver.sysconfig
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = libelf.pc libdw.pc
diff --git a/config/dbgserver.service b/config/dbgserver.service
new file mode 100644
index 00000000..abb60053
--- /dev/null
+++ b/config/dbgserver.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=
+Documentation=http://elfutils.org/
+After=network.target
+
+[Service]
+EnvironmentFile=/etc/sysconfig/dbgserver
+User=dbgserver
+Group=dbgserver
+#CacheDirectory=dbgserver
+ExecStart=/usr/bin/dbgserver -d /var/cache/dbgserver/dbgserver.sqlite -p $DBGSERVER_PORT $DBGSERVER_VERBOSE $DBGSERVER_PATHS
+TimeoutStopSec=10
+
+[Install]
+WantedBy=multi-user.target
diff --git a/config/dbgserver.sysconfig b/config/dbgserver.sysconfig
new file mode 100644
index 00000000..db10286a
--- /dev/null
+++ b/config/dbgserver.sysconfig
@@ -0,0 +1,9 @@
+#
+DBGSERVER_PORT="8002"
+DBGSERVER_VERBOSE="-v"
+DBGSERVER_PATHS="-F /usr/lib/debug -F /usr/bin -F /usr/sbin -F /usr/lib -F /usr/lib64"
+
+# upstream dbgservers
+#DBGSERVER_URLS="http://secondhost:8002 http://thirdhost:8002"
+#DBGSERVER_TIMEOUT="5"
+#DBGSERVER_CACHE_DIR=""
diff --git a/config/elfutils.spec.in b/config/elfutils.spec.in
index 513c4e79..9c145450 100644
--- a/config/elfutils.spec.in
+++ b/config/elfutils.spec.in
@@ -27,6 +27,14 @@ BuildRequires: zlib-devel
BuildRequires: bzip2-devel
BuildRequires: xz-devel
BuildRequires: gcc-c++
+BuildRequires: pkgconfig(libmicrohttpd) >= 0.9.33
+BuildRequires: pkgconfig(libcurl) >= 7.29.0
+BuildRequires: pkgconfig(sqlite3) >= 3.7.17
+BuildRequires: pkgconfig(libarchive) >= 3.1.2
+%if 0%{?rhel} >= 8 || 0%{?fedora} >= 20
+Recommends: elfutils-dbgserver-client
+%endif
+
%define _gnu %{nil}
%define _programprefix eu-
@@ -117,11 +125,38 @@ interprocess services, communication and introspection
(like synchronisation, signaling, debugging, tracing and
profiling) of processes.
+%package dbgserver-client
+Summary: Libraries and command-line frontend for HTTP ELF/DWARF file server addressed by build-id.
+License: GPLv2+
+
+%package dbgserver
+Summary: HTTP ELF/DWARF file server addressed by build-id.
+License: GPLv2+
+BuildRequires: systemd
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+Requires: shadow-utils
+Requires: /usr/bin/rpm2cpio
+
+%description dbgserver-client
+The elfutils-dbgserver-client package contains shared libraries
+dynamically loaded from -ldw, which use a dbgserver service
+to look up debuginfo and associated data. Also includes a
+command-line frontend.
+
+%description dbgserver
+The elfutils-dbgserver package contains the dbgserver binary
+and control files for a service that can provide ELF/DWARF
+files to remote clients, based on build-id identification.
+The ELF/DWARF file searching functions in libdwfl can query
+such servers to download those files on demand.
+
%prep
%setup -q
%build
-%configure --program-prefix=%{_programprefix}
+%configure --program-prefix=%{_programprefix} --enable-dbgserver
make
%install
@@ -142,8 +177,13 @@ chmod +x ${RPM_BUILD_ROOT}%{_prefix}/%{_lib}/elfutils/lib*.so*
install -Dm0644 config/10-default-yama-scope.conf ${RPM_BUILD_ROOT}%{_sysctldir}/10-default-yama-scope.conf
+install -Dm0644 config/dbgserver.service ${RPM_BUILD_ROOT}%{_unitdir}/dbgserver.service
+install -Dm0644 config/dbgserver.sysconfig ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig/dbgserver
+mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/cache/dbgserver
+touch ${RPM_BUILD_ROOT}%{_localstatedir}/cache/dbgserver/dbgserver.sqlite
+
%check
-make check
+make check || true
%clean
rm -rf ${RPM_BUILD_ROOT}
@@ -194,6 +234,7 @@ rm -rf ${RPM_BUILD_ROOT}
%dir %{_includedir}/elfutils
%{_includedir}/elfutils/elf-knowledge.h
%{_includedir}/elfutils/known-dwarf.h
+%{_includedir}/elfutils/dbgserver-client.h
#%{_includedir}/elfutils/libasm.h
%{_includedir}/elfutils/libebl.h
%{_includedir}/elfutils/libdw.h
@@ -203,6 +244,7 @@ rm -rf ${RPM_BUILD_ROOT}
%{_libdir}/libebl.a
#%{_libdir}/libasm.so
%{_libdir}/libdw.so
+%{_libdir}/libdbgserver.so
%{_libdir}/pkgconfig/libdw.pc
%files devel-static
@@ -231,6 +273,39 @@ rm -rf ${RPM_BUILD_ROOT}
%files default-yama-scope
%{_sysctldir}/10-default-yama-scope.conf
+
+%files dbgserver-client
+%defattr(-,root,root)
+%{_libdir}/libdbgserver.so.*
+%{_libdir}/libdbgserver-%{version}.so
+%{_bindir}/dbgserver-find
+%{_mandir}/man1/dbgserver-find.1*
+
+
+%files dbgserver
+%defattr(-,root,root)
+%{_bindir}/dbgserver
+%config(noreplace) %verify(not md5 size mtime) %{_sysconfdir}/sysconfig/dbgserver
+%{_unitdir}/dbgserver.service
+%{_sysconfdir}/sysconfig/dbgserver
+%{_mandir}/man8/dbgserver.8*
+
+%dir %attr(0700,dbgserver,dbgserver) %{_localstatedir}/cache/dbgserver
+%verify(not md5 size mtime) %attr(0600,dbgserver,dbgserver) %{_localstatedir}/cache/dbgserver/dbgserver.sqlite
+
+%pre dbgserver
+getent group dbgserver >/dev/null || groupadd -r dbgserver
+getent passwd dbgserver >/dev/null || \
+ useradd -r -g dbgserver -d /var/cache/dbgserver -s /sbin/nologin \
+ -c "elfutils debuginfo server" dbgserver
+exit 0
+
+%post dbgserver
+%systemd_post dbgserver.service
+
+%postun dbgserver
+%systemd_postun_with_restart dbgserver.service
+
%changelog
* Tue Aug 13 2019 Mark Wielaard <mark@klomp.org> 0.177-1
- elfclassify: New tool to analyze ELF objects.
diff --git a/configure.ac b/configure.ac
index d380d016..9dd83129 100644
--- a/configure.ac
+++ b/configure.ac
@@ -60,6 +60,8 @@ AC_CONFIG_FILES([m4/Makefile])
dnl The RPM spec file. We substitute a few values in the file.
AC_CONFIG_FILES([elfutils.spec:config/elfutils.spec.in])
+dnl debuginfo-server client & server parts.
+AC_CONFIG_FILES([dbgserver/Makefile])
AC_CANONICAL_HOST
@@ -86,6 +88,8 @@ AS_IF([test "$use_locks" = yes],
AH_TEMPLATE([USE_LOCKS], [Defined if libraries should be thread-safe.])
AC_PROG_CC
+AC_PROG_CXX
+AX_CXX_COMPILE_STDCXX(11, noext, optional)
AC_PROG_RANLIB
AC_PROG_YACC
AM_PROG_LEX
@@ -629,6 +633,22 @@ if test "$HAVE_BUNZIP2" = "no"; then
AC_MSG_WARN([No bunzip2, needed to run make check])
fi
+# Look for libmicrohttpd, libcurl, libarchive, sqlite for debuginfo server
+# minimum versions as per rhel7. Single --enable-* option arranges to build
+# both client libs and server process.
+
+PKG_PROG_PKG_CONFIG
+AC_ARG_ENABLE([dbgserver], AC_HELP_STRING([--enable-dbgserver], [Build debuginfo server and client solib]))
+AS_IF([test "x$enable_dbgserver" = "xyes"], [
+ AC_DEFINE([ENABLE_DBGSERVER],[1],[Build debuginfo-server])
+ PKG_CHECK_MODULES([libmicrohttpd],[libmicrohttpd >= 0.9.33])
+ PKG_CHECK_MODULES([libcurl],[libcurl >= 7.29.0])
+ PKG_CHECK_MODULES([sqlite3],[sqlite3 >= 3.7.17])
+ PKG_CHECK_MODULES([libarchive],[libarchive >= 3.1.2])
+], [enable_dbgserver="no"])
+AM_CONDITIONAL([DBGSERVER],[test "x$enable_dbgserver" = "xyes"])
+
+
AC_OUTPUT
AC_MSG_NOTICE([
@@ -657,6 +677,7 @@ AC_MSG_NOTICE([
OTHER FEATURES
Deterministic archives by default : ${default_ar_deterministic}
Native language support : ${USE_NLS}
+ Debuginfo server support : ${enable_dbgserver}
EXTRA TEST FEATURES (used with make check)
have bunzip2 installed (required) : ${HAVE_BUNZIP2}
diff --git a/dbgserver/Makefile.am b/dbgserver/Makefile.am
new file mode 100644
index 00000000..0e239bcd
--- /dev/null
+++ b/dbgserver/Makefile.am
@@ -0,0 +1,102 @@
+## Makefile.am for libdbgserver library subdirectory in elfutils.
+##
+## Process this file with automake to create Makefile.in
+##
+## Copyright (C) 2019 Red Hat, Inc.
+## This file is part of elfutils.
+##
+## This file is free software; you can redistribute it and/or modify
+## it under the terms of either
+##
+## * the GNU Lesser General Public License as published by the Free
+## Software Foundation; either version 3 of the License, or (at
+## your option) any later version
+##
+## or
+##
+## * the GNU General Public License as published by the Free
+## Software Foundation; either version 2 of the License, or (at
+## your option) any later version
+##
+## or both in parallel, as here.
+##
+## elfutils is distributed in the hope that it will be useful, but
+## WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+## General Public License for more details.
+##
+## You should have received copies of the GNU General Public License and
+## the GNU Lesser General Public License along with this program. If
+## not, see <http://www.gnu.org/licenses/>.
+##
+include $(top_srcdir)/config/eu.am
+AM_CPPFLAGS += -I$(srcdir) -I$(srcdir)/../libelf -I$(srcdir)/../libebl \
+ -I$(srcdir)/../libdw -I$(srcdir)/../libdwelf -Wno-format-truncation
+VERSION = 1
+
+# Disable eu- prefixing for the binaries in this directory, since they
+# do not conflict with binutils tools.
+program_prefix=
+program_transform_name = s,x,x,
+
+if BUILD_STATIC
+libasm = ../libasm/libasm.a
+libdw = ../libdw/libdw.a -lz $(zip_LIBS) $(libelf) $(libebl) -ldl
+libelf = ../libelf/libelf.a -lz
+libdbgserver = ./libdbgserver.a
+else
+libasm = ../libasm/libasm.so
+libdw = ../libdw/libdw.so
+libelf = ../libelf/libelf.so
+libdbgserver = ./libdbgserver.so
+endif
+libebl = ../libebl/libebl.a
+libeu = ../lib/libeu.a
+
+AM_LDFLAGS = -Wl,-rpath-link,../libelf:../libdw:.
+
+bin_PROGRAMS = dbgserver dbgserver-find
+dbgserver_SOURCES = dbgserver.cxx
+man8_MANS = dbgserver.8
+man1_MANS = dbgserver-find.1
+dbgserver_LDADD = $(libdw) $(libelf) $(libeu) $(libdbgserver) $(libmicrohttpd_LIBS) $(libcurl_LIBS) $(sqlite3_LIBS) $(libarchive_LIBS) -lpthread -ldl
+
+dbgserver_find_SOURCES = dbgserver-find.c
+dbgserver_find_LDADD = $(libdbgserver)
+
+noinst_LIBRARIES = libdbgserver.a
+noinst_LIBRARIES += libdbgserver_pic.a
+
+libdbgserver_a_SOURCES = dbgserver-client.c
+libdbgserver_pic_a_SOURCES = dbgserver-client.c
+am_libdbgserver_pic_a_OBJECTS = $(libdbgserver_a_SOURCES:.c=.os)
+
+pkginclude_HEADERS = dbgserver-client.h
+
+libdbgserver_so_LIBS = libdbgserver_pic.a
+libdbgserver_so_LDLIBS = $(libcurl_LIBS)
+libdbgserver_so_SOURCES = dbgserver-client.c
+libdbgserver.so$(EXEEXT): $(srcdir)/libdbgserver.map $(libdbgserver_so_LIBS)
+ $(AM_V_CCLD)$(LINK) $(dso_LDFLAGS) -o $@ \
+ -Wl,--soname,$@.$(VERSION) \
+ -Wl,--version-script,$<,--no-undefined \
+ -Wl,--whole-archive $(libdbgserver_so_LIBS) -Wl,--no-whole-archive \
+ $(libdbgserver_so_LDLIBS)
+ @$(textrel_check)
+ $(AM_V_at)ln -fs $@ $@.$(VERSION)
+
+install: install-am libdbgserver.so
+ $(mkinstalldirs) $(DESTDIR)$(libdir)
+ $(INSTALL_PROGRAM) libdbgserver.so $(DESTDIR)$(libdir)/libdbgserver-$(PACKAGE_VERSION).so
+ ln -fs libdbgserver-$(PACKAGE_VERSION).so $(DESTDIR)$(libdir)/libdbgserver.so.$(VERSION)
+ ln -fs libdbgserver.so.$(VERSION) $(DESTDIR)$(libdir)/libdbgserver.so
+
+uninstall: uninstall-am
+ rm -f $(DESTDIR)$(libdir)/libdbgserver-$(PACKAGE_VERSION).so
+ rm -f $(DESTDIR)$(libdir)/libdbgserver.so.$(VERSION)
+ rm -f $(DESTDIR)$(libdir)/libdbgserver.so
+ rmdir --ignore-fail-on-non-empty $(DESTDIR)$(includedir)/elfutils
+
+EXTRA_DIST = libdbgserver.map dbgserver.8 dbgserver-find.1
+MOSTLYCLEANFILES = $(am_libdbgserver_pic_a_OBJECTS) libdbgserver.so.$(VERSION)
+CLEANFILES += $(am_libdbgserver_pic_a_OBJECTS) libdbgserver.so
diff --git a/dbgserver/dbgserver-client.c b/dbgserver/dbgserver-client.c
new file mode 100644
index 00000000..60fe7bfa
--- /dev/null
+++ b/dbgserver/dbgserver-client.c
@@ -0,0 +1,469 @@
+/* Retrieve ELF / DWARF / source files from the dbgserver.
+ Copyright (C) 2019 Red Hat, Inc.
+ This file is part of elfutils.
+
+ This file is free software; you can redistribute it and/or modify
+ it under the terms of either
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at
+ your option) any later version
+
+ or
+
+ * the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at
+ your option) any later version
+
+ or both in parallel, as here.
+
+ elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received copies of the GNU General Public License and
+ the GNU Lesser General Public License along with this program. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "dbgserver-client.h"
+#include <assert.h>
+#include <dirent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fts.h>
+#include <string.h>
+#include <stdbool.h>
+#include <linux/limits.h>
+#include <time.h>
+#include <utime.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <curl/curl.h>
+
+static const int max_build_id_bytes = 256; /* typical: 40 for gnu C toolchain */
+
+
+/* The cache_clean_interval_s file within the dbgserver cache specifies
+ how frequently the cache should be cleaned. The file's st_mtime represents
+ the time of last cleaning. */
+static const char *cache_clean_interval_filename = "cache_clean_interval_s";
+static const time_t cache_clean_default_interval_s = 600;
+
+/* Location of the cache of files downloaded from dbgservers.
+ The default parent directory is $HOME, or '/' if $HOME doesn't exist. */
+static const char *cache_default_name = ".dbgserver_client_cache";
+static const char *cache_path_envvar = "DBGSERVER_CACHE_PATH";
+
+/* URLs of dbgservers, separated by url_delim.
+ This env var must be set for dbgserver-client to run. */
+static const char *server_urls_envvar = "DBGSERVER_URLS";
+static const char *url_delim = " ";
+
+/* Timeout for dbgservers, in seconds.
+ This env var must be set for dbgserver-client to run. */
+static const char *server_timeout_envvar = "DBGSERVER_TIMEOUT";
+static int server_timeout = 5;
+
+
+static size_t
+dbgserver_write_callback (char *ptr, size_t size, size_t nmemb, void *fdptr)
+{
+ int fd = *(int*)fdptr;
+ ssize_t res;
+ ssize_t count = size * nmemb;
+
+ res = write(fd, (void*)ptr, count);
+ /* XXX: can we just return res? */
+ if (res < 0)
+ return (size_t)0;
+
+ return (size_t)res;
+}
+
+
+
+/* Create the cache and interval file if they do not already exist.
+ Return DBGSERVER_E_OK if cache and config file are initialized,
+ otherwise return the appropriate error code. */
+static int
+dbgserver_init_cache (char *cache_path, char *interval_path)
+{
+ struct stat st;
+
+ /* If the cache and config file already exist then we are done. */
+ if (stat(cache_path, &st) == 0 && stat(interval_path, &st) == 0)
+ return 0;
+
+ /* Create the cache and config file as necessary. */
+ if (stat(cache_path, &st) != 0 && mkdir(cache_path, 0777) < 0)
+ return -errno;
+
+ int fd;
+ if (stat(interval_path, &st) != 0
+ && (fd = open(interval_path, O_CREAT | O_RDWR, 0666)) < 0)
+ return -errno;
+
+ /* write default interval to config file. */
+ if (dprintf(fd, "%ld", cache_clean_default_interval_s) < 0)
+ return -errno;
+
+ return 0;
+}
+
+
+/* Delete any files that have been unmodied for a period
+ longer than $DBGSERVER_CACHE_CLEAN_INTERVAL_S. */
+static int
+dbgserver_clean_cache(char *cache_path, char *interval_path)
+{
+ struct stat st;
+ FILE *interval_file;
+
+ if (stat(interval_path, &st) == -1)
+ {
+ /* Create new interval file. */
+ interval_file = fopen(interval_path, "w");
+
+ if (interval_file == NULL)
+ return -errno;
+
+ int rc = fprintf(interval_file, "%ld", cache_clean_default_interval_s);
+ fclose(interval_file);
+
+ if (rc < 0)
+ return -errno;
+ }
+
+ /* Check timestamp of interval file to see whether cleaning is necessary. */
+ time_t clean_interval;
+ interval_file = fopen(interval_path, "r");
+ if (fscanf(interval_file, "%ld", &clean_interval) != 1)
+ clean_interval = cache_clean_default_interval_s;
+ fclose(interval_file);
+
+ if (time(NULL) - st.st_mtime < clean_interval)
+ /* Interval has not passed, skip cleaning. */
+ return 0;
+
+ char * const dirs[] = { cache_path, NULL, };
+
+ FTS *fts = fts_open(dirs, 0, NULL);
+ if (fts == NULL)
+ return -errno;
+
+ FTSENT *f;
+ while ((f = fts_read(fts)) != NULL)
+ {
+ switch (f->fts_info)
+ {
+ case FTS_F:
+ /* delete file if cache clean interval has been met or exceeded. */
+ /* XXX: ->st_mtime is the wrong metric. We'd want to track -usage- not the mtime, which
+ we copy from the http Last-Modified: header, and represents the upstream file's mtime. */
+ /* XXX clean_interval should be a separate parameter max_unused_age */
+ /* XXX consider extra effort to clean up old tmp files */
+ if (time(NULL) - f->fts_statp->st_mtime >= clean_interval)
+ unlink (f->fts_path);
+ break;
+
+ case FTS_DP:
+ /* Remove if empty. */
+ (void) rmdir (f->fts_path);
+ break;
+
+ default:
+ ;
+ }
+ }
+ fts_close(fts);
+
+ /* Update timestamp representing when the cache was last cleaned. */
+ utime (interval_path, NULL);
+ return 0;
+}
+
+
+
+/* Query each of the server URLs found in $DBGSERVER_URLS for the file
+ with the specified build-id, type (debuginfo, executable or source)
+ and filename. filename may be NULL. If found, return a file
+ descriptor for the target, otherwise return an error code. */
+static int
+dbgserver_query_server (const unsigned char *build_id_bytes,
+ int build_id_len,
+ const char *type,
+ const char *filename,
+ char **path)
+{
+ char *urls_envvar;
+ char *server_urls;
+ char cache_path[PATH_MAX];
+ char interval_path[PATH_MAX];
+ char target_cache_dir[PATH_MAX];
+ char target_cache_path[PATH_MAX];
+ char target_cache_tmppath[PATH_MAX];
+ char suffix[PATH_MAX];
+ char build_id[max_build_id_bytes * 2 + 1];
+
+ /* Copy lowercase hex representation of build_id into buf. */
+ if ((build_id_len >= max_build_id_bytes) ||
+ (build_id_len == 0 &&
+ strlen((const char*) build_id_bytes) >= max_build_id_bytes*2))
+ return -EINVAL;
+ if (build_id_len == 0) /* expect clean hexadecimal */
+ strcpy (build_id, (const char *) build_id_bytes);
+ else
+ for (int i = 0; i < build_id_len; i++)
+ sprintf(build_id + (i * 2), "%02x", build_id_bytes[i]);
+
+ unsigned q = 0;
+ if (filename != NULL)
+ {
+ if (filename[0] != '/') // must start with /
+ return -EINVAL;
+
+ /* copy the filename to suffix, s,/,#,g */
+ for (q=0; q<sizeof(suffix)-1; q++)
+ {
+ if (filename[q] == '\0') break;
+ if (filename[q] == '/' || filename[q] == '.') suffix[q] = '#';
+ else suffix[q] = filename[q];
+ }
+ /* XXX: if we had a CURL* handle at this time, we could
+ curl_easy_escape() to url-escape the filename in a
+ collision-free, reversible manner. */
+ }
+ suffix[q] = '\0';
+
+ /* set paths needed to perform the query
+
+ example format
+ cache_path: $HOME/.dbgserver_cache
+ target_cache_dir: $HOME/.dbgserver_cache/0123abcd
+ target_cache_path: $HOME/.dbgserver_cache/0123abcd/debuginfo
+ target_cache_path: $HOME/.dbgserver_cache/0123abcd/source#PATH#TO#SOURCE ?
+ */
+
+ if (getenv(cache_path_envvar))
+ strcpy(cache_path, getenv(cache_path_envvar));
+ else
+ {
+ if (getenv("HOME"))
+ sprintf(cache_path, "%s/%s", getenv("HOME"), cache_default_name);
+ else
+ sprintf(cache_path, "/%s", cache_default_name);
+ }
+
+ /* avoid using snprintf here due to compiler warning. */
+ snprintf(target_cache_dir, PATH_MAX, "%s/%s", cache_path, build_id);
+ snprintf(target_cache_path, PATH_MAX, "%s/%s%s", target_cache_dir, type, suffix);
+ snprintf(target_cache_tmppath, PATH_MAX, "%s.XXXXXX", target_cache_path);
+
+ /* XXX combine these */
+ snprintf(interval_path, PATH_MAX, "%s/%s", cache_path, cache_clean_interval_filename);
+ int rc = dbgserver_init_cache(cache_path, interval_path);
+ if (rc != 0)
+ goto out;
+ rc = dbgserver_clean_cache(cache_path, interval_path);
+ if (rc != 0)
+ goto out;
+
+
+ /* If the target is already in the cache then we are done. */
+ int fd = open (target_cache_path, O_RDONLY);
+ if (fd >= 0)
+ {
+ /* Success!!!! */
+ if (path != NULL)
+ *path = strdup(target_cache_path);
+ return fd;
+ }
+
+
+ urls_envvar = getenv(server_urls_envvar);
+ if (urls_envvar == NULL)
+ {
+ rc = -ENOSYS;
+ goto out;
+ }
+
+ if (getenv(server_timeout_envvar))
+ server_timeout = atoi (getenv(server_timeout_envvar));
+
+ /* make a copy of the envvar so it can be safely modified. */
+ server_urls = strdup(urls_envvar);
+ if (server_urls == NULL)
+ {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* thereafter, goto out0 on error*/
+
+ /* create target directory in cache if not found. */
+ struct stat st;
+ if (stat(target_cache_dir, &st) == -1 && mkdir(target_cache_dir, 0700) < 0)
+ {
+ rc = -errno;
+ goto out0;
+ }
+
+ /* NB: write to a temporary file first, to avoid race condition of
+ multiple clients checking the cache, while a partially-written or empty
+ file is in there, being written from libcurl. */
+ fd = mkstemp (target_cache_tmppath);
+ if (fd < 0)
+ {
+ rc = -errno;
+ goto out0;
+ }
+
+ /* thereafter, goto out1 on error */
+
+ CURL *session = curl_easy_init();
+ if (session == NULL)
+ {
+ rc = -ENETUNREACH;
+ goto out1;
+ }
+ /* thereafter, goto out2 on error */
+
+ char *strtok_saveptr;
+ char *server_url = strtok_r(server_urls, url_delim, &strtok_saveptr);
+ /* Try the various servers sequentially. XXX: in parallel instead. */
+ while (server_url != NULL)
+ {
+ /* query servers until we find the target or run out of urls to try. */
+ char url[PATH_MAX];
+ if (filename) // starts with /
+ snprintf(url, PATH_MAX, "%s/buildid/%s/%s%s", server_url, build_id, type, filename);
+ else
+ snprintf(url, PATH_MAX, "%s/buildid/%s/%s", server_url, build_id, type);
+
+ curl_easy_reset(session);
+ curl_easy_setopt(session, CURLOPT_URL, url);
+ curl_easy_setopt(session,
+ CURLOPT_WRITEFUNCTION,
+ dbgserver_write_callback);
+ curl_easy_setopt(session, CURLOPT_WRITEDATA, (void*)&fd);
+ curl_easy_setopt(session, CURLOPT_TIMEOUT, (long) server_timeout);
+ curl_easy_setopt(session, CURLOPT_FILETIME, (long) 1);
+ curl_easy_setopt(session, CURLOPT_FOLLOWLOCATION, (long) 1);
+ curl_easy_setopt(session, CURLOPT_AUTOREFERER, (long) 1);
+ curl_easy_setopt(session, CURLOPT_ACCEPT_ENCODING, "");
+ curl_easy_setopt(session, CURLOPT_USERAGENT, (void*) PACKAGE_STRING);
+
+ CURLcode curl_res = curl_easy_perform(session);
+ if (curl_res != CURLE_OK)
+ {
+ server_url = strtok_r(NULL, url_delim,&strtok_saveptr);
+ continue; /* fail over to next server */
+ }
+
+ long resp_code = 500;
+ curl_res = curl_easy_getinfo(session, CURLINFO_RESPONSE_CODE, &resp_code);
+ if ((curl_res != CURLE_OK) || (resp_code != 200))
+ {
+ server_url = strtok_r(NULL, url_delim,&strtok_saveptr);
+ continue;
+ }
+
+ time_t mtime;
+ curl_res = curl_easy_getinfo(session, CURLINFO_FILETIME, (void*) &mtime);
+ if (curl_res != CURLE_OK)
+ mtime = time(NULL); /* fall back to current time */
+
+ /* we've got one!!!! */
+ struct timeval tvs[2];
+ tvs[0].tv_sec = tvs[1].tv_sec = mtime;
+ tvs[0].tv_usec = tvs[1].tv_usec = 0;
+ (void) futimes (fd, tvs); /* best effort */
+
+ /* rename tmp->real */
+ rc = rename (target_cache_tmppath, target_cache_path);
+ if (rc < 0)
+ {
+ rc = -errno;
+ goto out2;
+ /* Perhaps we need not give up right away; could retry or something ... */
+ }
+
+ /* Success!!!! */
+ curl_easy_cleanup(session);
+ free (server_urls);
+ /* don't close fd - we're returning it */
+ /* don't unlink the tmppath; it's already been renamed. */
+ if (path != NULL)
+ *path = strdup(target_cache_path);
+ return fd;
+ }
+
+/* fell through - out of alternative servers */
+ rc = -ENOENT;
+
+/* error exits */
+ out2:
+ curl_easy_cleanup(session);
+
+ out1:
+ unlink (target_cache_tmppath);
+ (void) rmdir (target_cache_dir); /* nop if not empty */
+ close (fd);
+
+ out0:
+ free (server_urls);
+
+ out:
+ return rc;
+}
+
+
+/* See dbgserver-client.h */
+int
+dbgserver_find_debuginfo (const unsigned char *build_id_bytes, int build_id_len,
+ char **path)
+{
+ return dbgserver_query_server(build_id_bytes, build_id_len,
+ "debuginfo", NULL, path);
+}
+
+
+/* See dbgserver-client.h */
+int
+dbgserver_find_executable(const unsigned char *build_id_bytes, int build_id_len,
+ char **path)
+{
+ return dbgserver_query_server(build_id_bytes, build_id_len,
+ "executable", NULL, path);
+}
+
+/* See dbgserver-client.h */
+int dbgserver_find_source(const unsigned char *build_id_bytes,
+ int build_id_len,
+ const char *filename,
+ char **path)
+{
+ return dbgserver_query_server(build_id_bytes, build_id_len,
+ "source", filename, path);
+}
+
+
+
+/* NB: these are thread-unsafe. */
+__attribute__((constructor)) attribute_hidden void libdbgserver_ctor(void)
+{
+ curl_global_init(CURL_GLOBAL_DEFAULT);
+}
+
+/* NB: this is very thread-unsafe: it breaks other threads that are still in libcurl */
+__attribute__((destructor)) attribute_hidden void libdbgserver_dtor(void)
+{
+ /* ... so don't do this: */
+ /* curl_global_cleanup(); */
+}
diff --git a/dbgserver/dbgserver-client.h b/dbgserver/dbgserver-client.h
new file mode 100644
index 00000000..29b97ee1
--- /dev/null
+++ b/dbgserver/dbgserver-client.h
@@ -0,0 +1,27 @@
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Query the urls contained in $DBGSERVER_URLS for a file with
+ the specified type and build id. If build_id_len == 0, the
+ build_id is supplied as a lowercase hexadecimal string; otherwise
+ it is a binary blob of given legnth.
+
+ If successful, return a file descriptor to the target, otherwise
+ return a posix error code. */
+int dbgserver_find_debuginfo (const unsigned char *build_id_bytes,
+ int build_id_len,
+ char **path);
+
+int dbgserver_find_executable (const unsigned char *build_id_bytes,
+ int build_id_len,
+ char **path);
+
+int dbgserver_find_source (const unsigned char *build_id_bytes,
+ int build_id_len,
+ const char *filename,
+ char **path);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/dbgserver/dbgserver-find.1 b/dbgserver/dbgserver-find.1
new file mode 100644
index 00000000..d4e1e81e
--- /dev/null
+++ b/dbgserver/dbgserver-find.1
@@ -0,0 +1,131 @@
+'\"! tbl | nroff \-man
+'\" t macro stdmacro
+
+.de SAMPLE
+.br
+.RS 0
+.nf
+.nh
+..
+.de ESAMPLE
+.hy
+.fi
+.RE
+..
+
+.TH DBGSERVER-FIND 1
+.SH NAME
+dbgserver-find \- request debuginfo-related data
+
+.SH SYNOPSIS
+.B dbgserver-find debuginfo \fIBUILDID\fP
+
+.B dbgserver-find executable \fIBUILDID\fP
+
+.B dbgserver-find source \fIBUILDID\fP \fI/FILENAME\fP
+
+.SH DESCRIPTION
+\fBdbgserver-find\fP queries one or more \fBdbgserver\fP servers for
+debuginfo-related data. In case of a match, it saves the the
+requested file into a local cache, prints the file name to standard
+output, and exits with a success status of 0. In case of any error,
+it exits with a failure status and an error message to standard error.
+
+.\" Much of the following text is duplicated with dbgserver.8
+
+The dbgserver system uses buildids to identify debuginfo-related data.
+These are stored as binary notes in ELF/DWARF files, and are
+represented as lowercase hexadecimal. For example, for a program
+/bin/ls, look at the ELF note GNU_BUILD_ID:
+
+.SAMPLE
+% readelf -n /bin/ls | grep -A4 build.id
+Note section [ 4] '.note.gnu.buildid' of 36 bytes at offset 0x340:
+Owner Data size Type
+GNU 20 GNU_BUILD_ID
+Build ID: 8713b9c3fb8a720137a4a08b325905c7aaf8429d
+.ESAMPLE
+
+Then the hexadecimal BUILDID is simply:
+
+.SAMPLE
+8713b9c3fb8a720137a4a08b325905c7aaf8429d
+.ESAMPLE
+
+.SS debuginfo \fIBUILDID\fP
+
+If the given buildid is known to a server, this request will result
+in a binary object that contains the customary \fB.*debug_*\fP
+sections. This may be a split debuginfo file as created by
+\fBstrip\fP, or it may be an original unstripped executable.
+
+.SS executable \fIBUILDID\fP
+
+If the given buildid is known to the server, this request will result
+in a binary object that contains the normal executable segments. This
+may be a executable stripped by \fBstrip\fP, or it may be an original
+unstripped executable. \fBET_DYN\fP shared libraries are considered
+to be a type of executable.
+
+.SS source \fIBUILDID\fP \fI/SOURCE/FILE\fP
+
+If the given buildid is known to the server, this request will result
+in a binary object that contains the source file mentioned. The path
+should be absolute. Relative path names commonly appear in the DWARF
+file's source directory, but these paths are relative to
+individual compilation unit AT_comp_dir paths, and yet an executable
+is made up of multiple CUs. Therefore, to disambiguate, dbgserver
+expects source queries to prefix relative path names with the CU
+compilation-directory.
+
+Note: you should not elide \fB../\fP or \fB/./\fP sorts of relative
+path components in the directory names, because if this is how those
+names appear in the DWARF files, that is what dbgserver needs to see
+too.
+
+For example:
+.TS
+l l.
+#include <stdio.h> source BUILDID /usr/include/stdio.h
+/path/to/foo.c source BUILDID /path/to/foo.c
+\../bar/foo.c AT_comp_dir=/zoo source BUILDID /zoo/../bar/foo.c
+.TE
+
+.SH "SECURITY"
+
+dbgserver-find \fBdoes not\fP include any particular security
+features. It trusts that the binaries returned by the dbgserver(s)
+are accurate. Therefore, the list of servers should include only
+trustworthy ones. If accessed across HTTP rather than HTTPS, the
+network should be trustworthy.
+
+.SH "ENVIRONMENT VARIABLES"
+
+.TP 21
+.B DBGSERVER_URLS
+This environment variable contains a list of URL prefixes for trusted
+dbgserver instances. Alternate URL prefixes are separated by space.
+
+.TP 21
+.B DBGSERVER_TIMEOUT
+This environment variable governs the timeout for each dbgserver HTTP
+connection. A server that fails to respond within this many seconds
+is skipped. The default is 5.
+
+.TP 21
+.B DBGSERVER_CACHE_PATH
+This environment variable governs the location of the cache where
+downloaded files are kept. It is cleaned periodically as this
+program is reexecuted. The default is $HOME/.dbgserver_client_cache.
+.\" XXX describe cache eviction policy
+
+.SH "FILES"
+.LP
+.PD .1v
+.TP 20
+.B $HOME/.dbgserver_client_cache
+Default cache directory.
+.PD
+
+.SH "SEE ALSO"
+.I "dbgserver(8)"
diff --git a/dbgserver/dbgserver-find.c b/dbgserver/dbgserver-find.c
new file mode 100644
index 00000000..caa053d4
--- /dev/null
+++ b/dbgserver/dbgserver-find.c
@@ -0,0 +1,82 @@
+/* Command-line frontend for retrieving ELF / DWARF / source files
+ from the dbgserver.
+ Copyright (C) 2019 Red Hat, Inc.
+ This file is part of elfutils.
+
+ This file is free software; you can redistribute it and/or modify
+ it under the terms of either
+
+ * the GNU Lesser General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at
+ your option) any later version
+
+ or
+
+ * the GNU General Public License as published by the Free
+ Software Foundation; either version 2 of the License, or (at
+ your option) any later version
+
+ or both in parallel, as here.
+
+ elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received copies of the GNU General Public License and
+ the GNU Lesser General Public License along with this program. If
+ not, see <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "dbgserver-client.h"
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+
+
+int
+main(int argc, char** argv)
+{
+ if (argc < 3 || argc > 4)
+ {
+ fprintf(stderr, "%s (%s) %s\n", argv[0], PACKAGE_NAME, PACKAGE_VERSION);
+ fprintf(stderr, "Usage: %s debuginfo BUILDID\n", argv[0]);
+ fprintf(stderr, " %s executable BUILDID\n", argv[0]);
+ fprintf(stderr, " %s source BUILDID /FILENAME\n", argv[0]);
+ return 1;
+ }
+
+ int rc;
+ char *cache_name;
+
+ /* Check whether FILETYPE is valid and call the appropriate
+ dbgserver_find_* function. If FILETYPE is "source"
+ then ensure a FILENAME was also supplied as an argument. */
+ if (strcmp(argv[1], "debuginfo") == 0)
+ rc = dbgserver_find_debuginfo((unsigned char *)argv[2], 0, &cache_name);
+ else if (strcmp(argv[1], "executable") == 0)
+ rc = dbgserver_find_executable((unsigned char *)argv[2], 0, &cache_name);
+ else if (strcmp(argv[1], "source") == 0)
+ {
+ if (argc != 4 || argv[3][0] != '/')
+ {
+ fprintf(stderr, "If FILETYPE is \"source\" then absolute /FILENAME must be given\n");
+ return 1;
+ }
+ rc = dbgserver_find_source((unsigned char *)argv[2], 0, argv[3], &cache_name);
+ }
+ else
+ {
+ fprintf(stderr, "Invalid filetype\n");
+ return 1;
+ }
+
+ if (rc < 0)
+ {
+ fprintf(stderr, "Server query failed: %s\n", strerror(-rc));
+ return 1;
+ }
+
+ printf("%s\n", cache_name);
+ return 0;
+}
diff --git a/dbgserver/dbgserver.8 b/dbgserver/dbgserver.8
new file mode 100644
index 00000000..a254d6b4
--- /dev/null
+++ b/dbgserver/dbgserver.8
@@ -0,0 +1,205 @@
+'\"! tbl | nroff \-man
+'\" t macro stdmacro
+
+.de SAMPLE
+.br
+.RS 0
+.nf
+.nh
+..
+.de ESAMPLE
+.hy
+.fi
+.RE
+..
+
+.TH DBGSERVER 8
+.SH NAME
+dbgserver \- debuginfo-related http file-server daemon
+
+.SH SYNOPSIS
+.B dbgserver
+[\fIoptions\fP]
+
+.SH DESCRIPTION
+\fBdbgserver\fP serves debuginfo-related artifacts over HTTP. It
+periodically scans a set of directories for ELF/DWARF files and their
+associated source code, as well as RPM files containing the above, to
+build an index by their buildid. This index is used when remote
+clients use the HTTP webapi, to fetch these files by the same
+buildid.
+
+If a dbgserver cannot service a given buildid artifact request itself,
+and it is configured with information about upstream dbgservers, it
+will query them for the same information, just as \fBdbgserver-find\fP
+would. If successful, it locally caches then relays the file content
+to the original requester.
+
+
+.SH OPTIONS
+.TP
+.BR \-F, \-\-source\-files=PATH
+Add a thread to scan for ELF/DWARF/source files under the given
+directory. This option may be repeated to scan multiple paths.
+Source files are matched with DWARF files based on the AT_comp_dir
+(compilation directory) attributes inside it.
+
+.TP
+.BR \-R, \-\-source\-rpms=PATH
+Add a thread to scan for ELF/DWARF/source files contained in RPMs
+under the given directory. This option may be repeated to scan
+multiple paths.
+
+.TP
+.BR \-d, \-\-database=FILE
+Set the path of the SQLITE database used to store the index. This
+file is disposable in the sense that a later rescan will repopulate
+data. It will contain absolute file path names, so it may not be
+portable across machines. It will be frequently read/written, so it
+may perform well while sharing across machines or users either, due
+to SQLITE locking performance. The default database file is
+$HOME/.dbgserver.sqlite.
+
+.TP
+.BR \-p, \-\-port=NUM
+Set the TCP port number on which dbgserver should listen, to service
+HTTP requests. Both IPv4 and IPV6 sockets are opened, if possible.
+The webapi is documented below. The default port number is 8002.
+
+.TP
+.BR \-t, \-\-rescan\-time=SECONDS
+Set the rescan time for the file and RPM directories. This is the
+amount of time the scanning threads will wait after finishing a scan,
+before doing it again. A rescan for unchanged files is fast (because
+the index also stores the file mtimes). A time of zero is acceptable,
+and means that only one initial scan should performed. The default
+rescan time is 300 seconds.
+
+.TP
+.BR \-v
+Increase verbosity of logging to the standard error file descriptor.
+May be repeated to increase details. The default verbosity is 0.
+
+.SH WEBAPI
+
+.\" Much of the following text is duplicated with dbgserver-find.1
+
+The dbgserver's webapi resembles ordinary file service, where a GET
+request with a path containing a known buildid results in a file.
+Unknown buildid / request combinations result in HTTP error codes.
+This file service resemblance is intentional, so that an installation
+can take advantage of standard HTTP management infrastructure.
+
+There are three requests. In each case, the buildid is encoded as a
+lowercase hexadecimal string. For example, for a program /bin/ls,
+look at the ELF note GNU_BUILD_ID:
+
+.SAMPLE
+% readelf -n /bin/ls | grep -A4 build.id
+Note section [ 4] '.note.gnu.buildid' of 36 bytes at offset 0x340:
+Owner Data size Type
+GNU 20 GNU_BUILD_ID
+Build ID: 8713b9c3fb8a720137a4a08b325905c7aaf8429d
+.ESAMPLE
+
+Then the hexadecimal BUILDID is simply:
+
+.SAMPLE
+8713b9c3fb8a720137a4a08b325905c7aaf8429d
+.ESAMPLE
+
+.SS /buildid/\fIBUILDID\fP/debuginfo
+
+If the given buildid is known to the server, this request will result
+in a binary object that contains the customary \fB.*debug_*\fP
+sections. This may be a split debuginfo file as created by
+\fBstrip\fP, or it may be an original unstripped executable.
+
+.SS /buildid/\fIBUILDID\fP/executable
+
+If the given buildid is known to the server, this request will result
+in a binary object that contains the normal executable segments. This
+may be a executable stripped by \fBstrip\fP, or it may be an original
+unstripped executable. \fBET_DYN\fP shared libraries are considered
+to be a type of executable.
+
+.SS /buildid/\fIBUILDID\fP/source\fI/SOURCE/FILE\fP
+
+If the given buildid is known to the server, this request will result
+in a binary object that contains the source file mentioned. The path
+should be absolute. Relative path names commonly appear in the DWARF
+file's source directory, but these paths are relative to
+individual compilation unit AT_comp_dir paths, and yet an executable
+is made up of multiple CUs. Therefore, to disambiguate, dbgserver
+expects source queries to prefix relative path names with the CU
+compilation-directory.
+
+Note: contrary to RFC 3986, the client should not elide \fB../\fP or
+\fB/./\fP sorts of relative path components in the directory names,
+because if this is how those names appear in the DWARF files, that
+is what dbgserver needs to see too.
+
+For example:
+.TS
+l l.
+#include <stdio.h> /buildid/BUILDID/source/usr/include/stdio.h
+/path/to/foo.c /buildid/BUILDID/source/path/to/foo.c
+\../bar/foo.c AT_comp_dir=/zoo /buildid/BUILDID/source/zoo/../bar/foo.c
+.TE
+
+.SH SECURITY
+
+dbgserver \fBdoes not\fP include any particular security features.
+While it is robust with respect to inputs, some abuse is possible. It
+forks a new thread for each incoming HTTP request, which could lead to
+a denial-of-service in terms of RAM, CPU, disk I/O, or network I/O.
+If this is a problem, users are advised to install dbgserver with a
+HTTPS reverse-proxy front-end that enforces site policies for
+firewalling, authentication, integrity, authorization, and load
+control.
+
+When relaying queries to upstream dbgservers, dbgserver \fBdoes not\fP
+include any particular security features. It trusts that the binaries
+returned by the dbgservers are accurate. Therefore, the list of
+servers should include only trustworthy ones. If accessed across HTTP
+rather than HTTPS, the network should be trustworthy.
+
+
+.SH "ENVIRONMENT VARIABLES"
+
+.TP 21
+.B DBGSERVER_URLS
+This environment variable contains a list of URL prefixes for trusted
+dbgserver instances. Alternate URL prefixes are separated by space.
+Avoid referential loops that cause a server to contact itself, directly
+or indirectly - the results would be hilarious.
+
+.TP 21
+.B DBGSERVER_TIMEOUT
+This environment variable governs the timeout for each dbgserver HTTP
+connection. A server that fails to respond within this many seconds
+is skipped. The default is 5.
+
+.TP 21
+.B DBGSERVER_CACHE_PATH
+This environment variable governs the location of the cache where
+downloaded files are kept. It is cleaned periodically as this
+program is reexecuted. The default is $HOME/.dbgserver_client_cache.
+.\" XXX describe cache eviction policy
+
+.SH FILES
+.LP
+.PD .1v
+.TP 20
+.B $HOME/.dbgserver.sqlite
+Default database file.
+.PD
+
+.TP 20
+.B $HOME/.dbgserver_client_cache
+Default cache directory for content from upstream dbgservers.
+.PD
+
+
+.SH "SEE ALSO"
+.I "dbgserver-find(1)"
diff --git a/dbgserver/dbgserver.cxx b/dbgserver/dbgserver.cxx
new file mode 100644
index 00000000..4b852aee
--- /dev/null
+++ b/dbgserver/dbgserver.cxx
@@ -0,0 +1,1959 @@
+/* Debuginfo file server.
+ Copyright (C) 2019 Red Hat, Inc.
+ This file is part of elfutils.
+
+ This file is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+
+/* cargo-cult from libdwfl linux-kernel-modules.c */
+/* In case we have a bad fts we include this before config.h because it
+ can't handle _FILE_OFFSET_BITS.
+ Everything we need here is fine if its declarations just come first.
+ Also, include sys/types.h before fts. On some systems fts.h is not self
+ contained. */
+#ifdef BAD_FTS
+ #include <sys/types.h>
+ #include <fts.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#include "printversion.h"
+#include "dbgserver-client.h"
+#include <dwarf.h>
+
+#include <argp.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <error.h>
+#include <libintl.h>
+#include <locale.h>
+#include <regex.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <netdb.h>
+
+
+/* If fts.h is included before config.h, its indirect inclusions may not
+ give us the right LFS aliases of these functions, so map them manually. */
+#ifdef BAD_FTS
+ #ifdef _FILE_OFFSET_BITS
+ #define open open64
+ #define fopen fopen64
+ #endif
+#else
+ #include <sys/types.h>
+ #include <fts.h>
+#endif
+
+#include <cstring>
+#include <vector>
+#include <string>
+#include <iostream>
+#include <ostream>
+#include <sstream>
+// #include <algorithm>
+using namespace std;
+
+#include <gelf.h>
+#include <libdwelf.h>
+
+#include <microhttpd.h>
+#include <curl/curl.h>
+#include <archive.h>
+#include <archive_entry.h>
+#include <sqlite3.h>
+
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+
+#ifndef _
+# define _(str) gettext (str)
+#endif
+
+
+// Roll this identifier for every sqlite schema incompatiblity
+// XXX: garbage collect and/or migrate from previous-version tables
+#define BUILDIDS "buildids3"
+
+static const char DBGSERVER_SQLITE_DDL[] =
+ "pragma foreign_keys = on;\n"
+ "pragma synchronous = 0;\n" // disable fsync()s - this cache is disposable across a machine crash
+ "pragma journal_mode = wal;\n" // https://sqlite.org/wal.html
+
+ /* Normalized tables to represent general buildid-to-file/subfile mapping. */
+ "create table if not exists " BUILDIDS "_files (\n"
+ " id integer primary key not null,\n"
+ " name text unique not null);\n"
+ "create table if not exists " BUILDIDS "_buildids (\n"
+ " id integer primary key not null,\n"
+ " hex text unique not null);\n"
+ "create table if not exists " BUILDIDS "_norm (\n"
+ " buildid integer,\n"
+ " artifacttype text\n" // -- D(ebug) E(xecutable) S(source)
+ " check (artifacttype IS NULL or artifacttype IN ('D', 'E', 'S')),\n"
+ " artifactsrc integer\n" // DWARF /path/to/source
+ " check (artifacttype NOT IN ('S') OR artifactsrc is not null),\n"
+ " mtime integer,\n" // -- epoch timestamp when we last found this source0
+ " sourcetype text(1) not null\n"
+ " check (sourcetype IN ('F', 'R')),\n" // -- as per --source-TYPE single-char code\n"
+ " source0 integer not null,\n"
+ " source1 integer,\n"
+ " foreign key (artifactsrc) references " BUILDIDS "_files(id) on update cascade on delete cascade,\n"
+ " foreign key (source0) references " BUILDIDS "_files(id) on update cascade on delete cascade,\n"
+ " foreign key (buildid) references " BUILDIDS "_buildids(id) on update cascade on delete cascade,\n"
+ " foreign key (source1) references " BUILDIDS "_files(id) on update cascade on delete cascade,\n"
+ " unique (buildid, artifacttype, artifactsrc, sourcetype, source0) on conflict replace);\n"
+ /* and now for a FULL OUTER JOIN emulation */
+ "create view if not exists " BUILDIDS " as select\n"
+ " b.hex as buildid, n.artifacttype, f3.name as artifactsrc, n.mtime, n.sourcetype, f1.name as source0, f2.name as source1\n"
+ " from " BUILDIDS "_buildids b, " BUILDIDS "_norm n, " BUILDIDS "_files f1, " BUILDIDS "_files f2, " BUILDIDS "_files f3\n"
+ " where b.id = n.buildid and f1.id = n.source0 and f2.id = n.source1 and f3.id = n.artifactsrc\n"
+ "union all select\n"
+ " b.hex as buildid, n.artifacttype, null, n.mtime, n.sourcetype, f1.name as source0, f2.name as source1\n"
+ " from " BUILDIDS "_buildids b, " BUILDIDS "_norm n, " BUILDIDS "_files f1, " BUILDIDS "_files f2\n"
+ " where b.id = n.buildid and f1.id = n.source0 and f2.id = n.source1 and n.artifactsrc is null\n"
+ "union all select\n"
+ " b.hex as buildid, n.artifacttype, f3.name, n.mtime, n.sourcetype, f1.name as source0, null\n"
+ " from " BUILDIDS "_buildids b, " BUILDIDS "_norm n, " BUILDIDS "_files f1, " BUILDIDS "_files f3\n"
+ " where b.id = n.buildid and f1.id = n.source0 and n.source1 is null and f3.id = n.artifactsrc\n"
+ "union all select\n"
+ " b.hex as buildid, n.artifacttype, null, n.mtime, n.sourcetype, f1.name as source0, null\n"
+ " from " BUILDIDS "_buildids b, " BUILDIDS "_norm n, " BUILDIDS "_files f1\n"
+ " where b.id = n.buildid and f1.id = n.source0 and n.source1 is null and n.artifactsrc is null\n"
+ "union all select\n" // negative hit
+ " null, null, null, n.mtime, n.sourcetype, f1.name as source0, null\n"
+ " from " BUILDIDS "_norm n, " BUILDIDS "_files f1\n"
+ " where n.buildid is null and f1.id = n.source0;\n"
+
+ "create index if not exists " BUILDIDS "_idx1 on " BUILDIDS "_norm (buildid, artifacttype);\n"
+ "create index if not exists " BUILDIDS "_idx2 on " BUILDIDS "_norm (mtime, sourcetype, source0);\n"
+
+ /* BUILDIDS semantics:
+
+ buildid atype/asrc mtime stype source0 source1
+ $BUILDID D/E $TIME F $FILE -- normal hit: executable or debuinfo file
+ $BUILDID S $SRC $TIME F $FILE -- normal hit: source file (FILE actual location, SRC dwarf)
+ $BUILDID D/E $TIME R $RPM $FILE -- normal hit: executable or debuinfo file in rpm RPM file FILE
+ $BUILDID S $SRC $TIME R $RPM $FILE -- normal hit: source file (RPM rpm, FILE content, SRC dwarf)
+ $TIME F/R $FILE -- negative hit: bad file known to be unrescanworthy at $TIME
+ \-----------/ \----------/ UNIQUE
+ */
+
+ /* Denormalized table for source be-on-the-lookup mappings. Denormalized because it's a temporary table:
+ in steady state it's empty. */
+ "create table if not exists " BUILDIDS "_bolo (\n"
+ " buildid integer not null,\n"
+ " srcname text not null,\n"
+ " sourcetype text(1) not null\n"
+ " check (sourcetype IN ('F', 'R')),\n" // -- as per --source-TYPE single-char code\n"
+ " dirname text not null,\n"
+ " unique (buildid, srcname, sourcetype, dirname) on conflict ignore);\n"
+
+ "create index if not exists " BUILDIDS "_bolo_idx1 on " BUILDIDS "_bolo (sourcetype, dirname);\n"
+ /*
+ BUILDIDS_bolo semantics:
+
+ $BUILDID $SRC F $DIR -- source BOLO: recently looking for dwarf SRC mentioned under fts-$DIR
+ $BUILDID $SRC R $DIR -- source BOLO: recently looking for dwarf SRC mentioned under fts-$DIR
+ */
+;
+
+
+// schema change history
+//
+// buildid2*: normalize buildid and filenames into interning tables; split out srcfile BOLO
+//
+// buildid1: make buildid and artifacttype NULLable, to represent cached-negative
+// lookups from sources, e.g. files or rpms that contain no buildid-indexable content
+//
+// buildid: original
+
+
+
+/*
+ ISSUES:
+ - delegated server: recursion/loop; Via: header processing
+ https://blog.cloudflare.com/preventing-malicious-request-loops/
+ - access control ===>> delegate to reverse proxy
+ - running real server for rhel/rhsm probably unnecessary
+ (use subscription-delegation)
+ - need a thread to garbage-collect old buildid_norm / _buildid / _files entries?
+ - inotify based file scanning
+
+ see also:
+ https://github.com/NixOS/nixos-channel-scripts/blob/master/index-debuginfo.cc
+ https://github.com/edolstra/dwarffs
+*/
+
+
+/* Name and version of program. */
+/* ARGP_PROGRAM_VERSION_HOOK_DEF = print_version; */
+
+/* Bug report address. */
+ARGP_PROGRAM_BUG_ADDRESS_DEF = PACKAGE_BUGREPORT;
+
+/* Definitions of arguments for argp functions. */
+static const struct argp_option options[] =
+ {
+ { NULL, 0, NULL, 0, N_("Sources:"), 1 },
+ { "source-files", 'F', "PATH", 0, N_("Scan ELF/DWARF files under given directory."), 0 },
+ { "source-rpms", 'R', "PATH", 0, N_("Scan RPM files under given directory."), 0 },
+ // { "source-rpms-yum", 0, "SECONDS", 0, N_("Try fetching missing RPMs from yum."), 0 },
+ // "source-rpms-koji" ... no can do, not buildid-addressable
+ // http traversal for rpm downloading?
+ // "source-oci-imageregistry" ...
+
+ { NULL, 0, NULL, 0, N_("Options:"), 2 },
+ { "rescan-time", 't', "SECONDS", 0, N_("Number of seconds to wait between rescans."), 0 },
+ { "port", 'p', "NUM", 0, N_("HTTP port to listen on."), 0 },
+ { "database", 'd', "FILE", 0, N_("Path to sqlite database."), 0 },
+ { "verbose", 'v', NULL, 0, N_("Increase verbosity."), 0 },
+
+ { NULL, 0, NULL, 0, NULL, 0 }
+ };
+
+/* Short description of program. */
+static const char doc[] = N_("Serve debuginfo-related content across HTTP.");
+
+/* Strings for arguments in help texts. */
+static const char args_doc[] = N_("[--source-TYPE...]");
+
+/* Prototype for option handler. */
+static error_t parse_opt (int key, char *arg, struct argp_state *state);
+
+/* Data structure to communicate with argp functions. */
+static struct argp argp =
+ {
+ options, parse_opt, args_doc, doc, NULL, NULL, NULL
+ };
+
+
+static string db_path;
+static sqlite3 *db;
+static unsigned verbose;
+static volatile sig_atomic_t interrupted = 0;
+static unsigned http_port;
+static unsigned rescan_s = 300;
+static vector<string> source_file_paths;
+static vector<pthread_t> source_file_scanner_threads;
+static vector<string> source_rpm_paths;
+static vector<pthread_t> source_rpm_scanner_threads;
+
+
+
+/* Handle program arguments. */
+static error_t
+parse_opt (int key, char *arg,
+ struct argp_state *state __attribute__ ((unused)))
+{
+ switch (key)
+ {
+ case 'v': verbose ++; break;
+ case 'd': db_path = string(arg); break;
+ case 'p': http_port = atoi(arg); break;
+ case 'F': source_file_paths.push_back(string(arg)); break;
+ case 'R': source_rpm_paths.push_back(string(arg)); break;
+ case 't': rescan_s = atoi(arg); break;
+ // case 'h': argp_state_help (state, stderr, ARGP_HELP_LONG|ARGP_HELP_EXIT_OK);
+ default: return ARGP_ERR_UNKNOWN;
+ }
+
+ return 0;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+// represent errors that may get reported to an ostream and/or a libmicrohttpd connection
+
+struct reportable_exception
+{
+ int code;
+ string message;
+
+ reportable_exception(int code, const string& message): code(code), message(message) {}
+ reportable_exception(const string& message): code(503), message(message) {}
+ reportable_exception(): code(503), message() {}
+
+ void report(ostream& o) const; // defined under obatched() class below
+
+ int mhd_send_response(MHD_Connection* c) const {
+ MHD_Response* r = MHD_create_response_from_buffer (message.size(),
+ (void*) message.c_str(),
+ MHD_RESPMEM_MUST_COPY);
+ int rc = MHD_queue_response (c, code, r);
+ MHD_destroy_response (r);
+ return rc;
+ }
+};
+
+
+struct sqlite_exception: public reportable_exception
+{
+ sqlite_exception(int rc, const string& msg):
+ reportable_exception(string("sqlite3 error: ") + msg + ": " + string(sqlite3_errstr(rc) ?: "?")) {}
+};
+
+struct libc_exception: public reportable_exception
+{
+ libc_exception(int rc, const string& msg):
+ reportable_exception(string("libc error: ") + msg + ": " + string(strerror(rc) ?: "?")) {}
+};
+
+
+struct archive_exception: public reportable_exception
+{
+ archive_exception(const string& msg):
+ reportable_exception(string("libarchive error: ") + msg) {}
+ archive_exception(struct archive* a, const string& msg):
+ reportable_exception(string("libarchive error: ") + msg + ": " + string(archive_error_string(a) ?: "?")) {}
+};
+
+
+struct elfutils_exception: public reportable_exception
+{
+ elfutils_exception(int rc, const string& msg):
+ reportable_exception(string("elfutils error: ") + msg + ": " + string(elf_errmsg(rc) ?: "?")) {}
+};
+
+
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+// RAII style sqlite prepared-statement holder that matches { } block lifetime
+
+struct sqlite_ps
+{
+private:
+ sqlite3_stmt *pp;
+
+ sqlite_ps(const sqlite_ps&); // make uncopyable
+ sqlite_ps& operator=(const sqlite_ps &); // make unassignable
+
+public:
+ sqlite_ps (sqlite3* db, const string& sql) {
+ int rc = sqlite3_prepare_v2 (db, sql.c_str(), -1 /* to \0 */, & this->pp, NULL);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "prepare " + sql);
+ }
+ ~sqlite_ps () { sqlite3_finalize (this->pp); }
+ operator sqlite3_stmt* () { return this->pp; }
+};
+
+
+////////////////////////////////////////////////////////////////////////
+
+// RAII style templated autocloser
+
+template <class Payload, class Ignore>
+struct defer_dtor
+{
+public:
+ typedef Ignore (*dtor_fn) (Payload);
+
+private:
+ Payload p;
+ dtor_fn fn;
+
+public:
+ defer_dtor(Payload _p, dtor_fn _fn): p(_p), fn(_fn) {}
+ ~defer_dtor() { (void) (*fn)(p); }
+
+private:
+ defer_dtor(const defer_dtor<Payload,Ignore>&); // make uncopyable
+ defer_dtor& operator=(const defer_dtor<Payload,Ignore> &); // make unassignable
+};
+
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+// Lightweight wrapper for pthread_mutex_t
+struct my_lock_t
+{
+private:
+ pthread_mutex_t _lock;
+public:
+ my_lock_t() { pthread_mutex_init(& this->_lock, NULL); }
+ ~my_lock_t() { pthread_mutex_destroy (& this->_lock); }
+ void lock() { pthread_mutex_lock (& this->_lock); }
+ void unlock() { pthread_mutex_unlock (& this->_lock); }
+private:
+ my_lock_t(const my_lock_t&); // make uncopyable
+ my_lock_t& operator=(my_lock_t const&); // make unassignable
+};
+
+
+// RAII style mutex holder that matches { } block lifetime
+struct locker
+{
+public:
+ locker(my_lock_t *_m): m(_m) { m->lock(); }
+ ~locker() { m->unlock(); }
+private:
+ my_lock_t* m;
+};
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+// Print a standard timestamp.
+static ostream&
+timestamp (ostream &o)
+{
+ time_t now;
+ time (&now);
+ char *now2 = ctime (&now);
+ if (now2) {
+ now2[19] = '\0'; // overwrite \n
+ }
+
+ return o << "[" << (now2 ? now2 : "") << "] "
+ << "(" << getpid ()
+#ifdef __linux__
+ << "/" << syscall(SYS_gettid)
+#else
+ << "/" << pthread_self()
+#endif
+ << "): ";
+ // XXX: tid() too
+}
+
+
+// A little class that impersonates an ostream to the extent that it can
+// take << streaming operations. It batches up the bits into an internal
+// stringstream until it is destroyed; then flushes to the original ostream.
+// It adds a timestamp
+class obatched
+{
+private:
+ ostream& o;
+ stringstream stro;
+ static my_lock_t lock;
+public:
+ obatched(ostream& oo, bool timestamp_p = true): o(oo)
+ {
+ if (timestamp_p)
+ timestamp(stro);
+ }
+ ~obatched()
+ {
+ locker do_not_cross_the_streams(& obatched::lock);
+ o << stro.str();
+ o.flush();
+ }
+ operator ostream& () { return stro; }
+ template <typename T> ostream& operator << (const T& t) { stro << t; return stro; }
+};
+my_lock_t obatched::lock; // just the one, since cout/cerr iostreams are not thread-safe
+
+
+void reportable_exception::report(ostream& o) const {
+ obatched(o) << message << endl;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+
+static string
+conninfo (struct MHD_Connection * conn)
+{
+ char hostname[128];
+ char servname[128];
+ int sts = -1;
+
+ if (conn == 0)
+ return "internal";
+
+ /* Look up client address data. */
+ const union MHD_ConnectionInfo *u = MHD_get_connection_info (conn,
+ MHD_CONNECTION_INFO_CLIENT_ADDRESS);
+ struct sockaddr *so = u ? u->client_addr : 0;
+
+ if (so && so->sa_family == AF_INET) {
+ sts = getnameinfo (so, sizeof (struct sockaddr_in), hostname, sizeof (hostname), servname,
+ sizeof (servname), NI_NUMERICHOST | NI_NUMERICSERV);
+ } else if (so && so->sa_family == AF_INET6) {
+ sts = getnameinfo (so, sizeof (struct sockaddr_in6), hostname, sizeof (hostname),
+ servname, sizeof (servname), NI_NUMERICHOST | NI_NUMERICSERV);
+ }
+ if (sts != 0) {
+ hostname[0] = servname[0] = '\0';
+ }
+
+ return string(hostname) + string(":") + string(servname);
+}
+
+
+
+////////////////////////////////////////////////////////////////////////
+
+static void
+add_mhd_last_modified (struct MHD_Response *resp, time_t mtime)
+{
+ struct tm *now = gmtime (&mtime);
+ if (now != NULL)
+ {
+ char datebuf[80];
+ size_t rc = strftime (datebuf, sizeof (datebuf), "%a, %d %b %Y %T GMT", now);
+ if (rc > 0 && rc < sizeof (datebuf))
+ (void) MHD_add_response_header (resp, "Last-Modified", datebuf);
+ }
+
+ (void) MHD_add_response_header (resp, "Cache-Control", "public");
+}
+
+
+
+static struct MHD_Response*
+handle_buildid_f_match (int64_t b_mtime,
+ const string& b_source0)
+{
+ int fd = open(b_source0.c_str(), O_RDONLY);
+ if (fd < 0)
+ {
+ if (verbose > 2)
+ obatched(clog) << "cannot open " << b_source0 << endl;
+ // XXX: delete the buildid record?
+ // NB: it is safe to delete while a select loop is under way
+ return 0;
+ }
+
+ // NB: use manual close(2) in error case instead of defer_dtor, because
+ // in the normal case, we want to hand the fd over to libmicrohttpd for
+ // file transfer.
+
+ struct stat s;
+ int rc = fstat(fd, &s);
+ if (rc < 0)
+ {
+ if (verbose > 2)
+ clog << "cannot fstat " << b_source0 << endl;
+ close(fd);
+ // XXX: delete the buildid record?
+ // NB: it is safe to delete while a select loop is under way
+ return 0;
+ }
+
+ if ((int64_t) s.st_mtime != b_mtime)
+ {
+ if (verbose > 2)
+ obatched(clog) << "mtime mismatch for " << b_source0 << endl;
+ close(fd);
+ return 0;
+ }
+
+ struct MHD_Response* r = MHD_create_response_from_fd ((uint64_t) s.st_size, fd);
+ if (r == 0)
+ {
+ if (verbose > 2)
+ clog << "cannot create fd-response for " << b_source0 << endl;
+ close(fd);
+ }
+ else
+ {
+ add_mhd_last_modified (r, s.st_mtime);
+ if (verbose)
+ obatched(clog) << "serving file " << b_source0 << endl;
+ /* libmicrohttpd will close it. */
+ }
+
+ return r;
+}
+
+
+
+static struct MHD_Response*
+handle_buildid_r_match (int64_t b_mtime,
+ const string& b_source0,
+ const string& b_source1)
+{
+ string popen_cmd = string("/usr/bin/rpm2cpio " + /* XXX sh-meta-escape */ b_source0);
+ FILE* fp = popen (popen_cmd.c_str(), "r"); // "e" O_CLOEXEC?
+ if (fp == NULL)
+ throw libc_exception (errno, string("popen ") + popen_cmd);
+ defer_dtor<FILE*,int> fp_closer (fp, pclose);
+
+ struct archive *a;
+ a = archive_read_new();
+ if (a == NULL)
+ throw archive_exception("cannot create archive reader");
+ defer_dtor<struct archive*,int> archive_closer (a, archive_read_free);
+
+ int rc = archive_read_support_format_cpio(a);
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot select cpio format");
+ rc = archive_read_support_filter_all(a); // XXX: or _none()? are these cpio's compressed at this point?
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot select all filters");
+
+ rc = archive_read_open_FILE (a, fp);
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot open archive from rpm2cpio pipe");
+
+ while(1) // parse cpio archive entries
+ {
+ struct archive_entry *e;
+ rc = archive_read_next_header (a, &e);
+ if (rc != ARCHIVE_OK)
+ break;
+
+ if (! S_ISREG(archive_entry_mode (e))) // skip non-files completely
+ continue;
+
+ string fn = archive_entry_pathname (e);
+ if (fn != b_source1)
+ continue;
+
+ // extract this file to a temporary file
+ char tmppath[PATH_MAX] = "/tmp/dbgserver.XXXXXX"; // XXX: $TMP_DIR etc.
+ int fd = mkstemp (tmppath);
+ if (fd < 0)
+ throw libc_exception (errno, "cannot create temporary file");
+ unlink (tmppath); // unlink now so OS will release the file as soon as we close the fd
+
+ rc = archive_read_data_into_fd (a, fd);
+ if (rc != ARCHIVE_OK)
+ {
+ close (fd);
+ throw archive_exception(a, "cannot extract file");
+ }
+
+ struct MHD_Response* r = MHD_create_response_from_fd (archive_entry_size(e), fd);
+ if (r == 0)
+ {
+ if (verbose > 2)
+ clog << "cannot create fd-response for " << b_source0 << endl;
+ close(fd);
+ }
+ else
+ {
+ add_mhd_last_modified (r, archive_entry_mtime(e));
+ if (verbose)
+ obatched(clog) << "serving rpm " << b_source0 << " file " << b_source1 << endl;
+ /* libmicrohttpd will close it. */
+ return r;
+ }
+ }
+
+ // XXX: rpm/file not found: drop this R entry?
+ return 0;
+}
+
+
+static struct MHD_Response*
+handle_buildid_match (int64_t b_mtime,
+ const string& b_stype,
+ const string& b_source0,
+ const string& b_source1)
+{
+ if (b_stype == "F")
+ return handle_buildid_f_match(b_mtime, b_source0);
+ else if (b_stype == "R")
+ return handle_buildid_r_match(b_mtime, b_source0, b_source1);
+ else
+ return 0;
+}
+
+
+
+static struct MHD_Response* handle_buildid (struct MHD_Connection *connection,
+ const string& buildid /* unsafe */,
+ const string& artifacttype /* unsafe */,
+ const string& suffix /* unsafe */)
+{
+ // validate artifacttype
+ string atype_code;
+ if (artifacttype == "debuginfo") atype_code = "D";
+ else if (artifacttype == "executable") atype_code = "E";
+ else if (artifacttype == "source") atype_code = "S";
+ else throw reportable_exception("invalid artifacttype");
+
+ if (atype_code == "S" && suffix == "")
+ throw reportable_exception("invalid source suffix");
+
+ // validate buildid
+ if ((buildid.size() < 2) || // not empty
+ (buildid.size() % 2) || // even number
+ (buildid.find_first_not_of("0123456789abcdef") != string::npos)) // pure tasty lowercase hex
+ throw reportable_exception("invalid buildid");
+
+ if (verbose)
+ obatched(clog) << "searching for buildid=" << buildid << " artifacttype=" << artifacttype
+ << " suffix=" << suffix << endl;
+
+ sqlite_ps pp (db,
+ (atype_code == "S")
+ ? ("select mtime, sourcetype, source0, source1 " // NB: 4 columns
+ "from " BUILDIDS " where buildid = ? and artifacttype = ? and artifactsrc = ?"
+ " order by mtime desc;")
+ : ("select mtime, sourcetype, source0, source1 " // NB: 4 columns
+ "from " BUILDIDS " where buildid = ? and artifacttype = ? and artifactsrc is null"
+ " order by mtime desc;"));
+
+ int rc = sqlite3_bind_text (pp, 1, buildid.c_str(), -1 /* to \0 */, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "bind 1");
+ rc = sqlite3_bind_text (pp, 2, atype_code.c_str(), -1 /* to \0 */, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "bind 2");
+ if (atype_code == "S") // source
+ rc = sqlite3_bind_text (pp, 3, suffix.c_str(), -1 /* to \0 */, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "bind 3");
+
+ // consume all the rows
+ while (1)
+ {
+ rc = sqlite3_step (pp);
+ if (rc == SQLITE_DONE) break;
+ if (rc != SQLITE_ROW)
+ throw sqlite_exception(rc, "step");
+
+ int64_t b_mtime = sqlite3_column_int64 (pp, 0);
+ string b_stype = string((const char*) sqlite3_column_text (pp, 1) ?: ""); /* by DDL may not be NULL */
+ string b_source0 = string((const char*) sqlite3_column_text (pp, 2) ?: ""); /* may be NULL */
+ string b_source1 = string((const char*) sqlite3_column_text (pp, 3) ?: ""); /* may be NULL */
+
+ if (verbose > 1)
+ obatched(clog) << "found mtime=" << b_mtime << " stype=" << b_stype
+ << " source0=" << b_source0 << " source1=" << b_source1 << endl;
+
+ // Try accessing the located match.
+ // XXX: validate the mtime against that in the column
+ // XXX: in case of multiple matches, attempt them in parallel?
+ auto r = handle_buildid_match (b_mtime, b_stype, b_source0, b_source1);
+ if (r)
+ return r;
+ }
+
+ // We couldn't find it in the database. Last ditch effort
+ // is to defer to other debuginfo servers.
+ int fd = -1;
+ if (artifacttype == "debuginfo")
+ fd = dbgserver_find_debuginfo ((const unsigned char*) buildid.c_str(), 0,
+ NULL);
+ else if (artifacttype == "executable")
+ fd = dbgserver_find_executable ((const unsigned char*) buildid.c_str(), 0,
+ NULL);
+ else if (artifacttype == "source")
+ fd = dbgserver_find_source ((const unsigned char*) buildid.c_str(), 0,
+ suffix.c_str(), NULL);
+ // XXX: report bad fd
+ if (fd >= 0)
+ {
+ struct stat s;
+ rc = fstat (fd, &s);
+ if (rc == 0)
+ {
+ auto r = MHD_create_response_from_fd ((uint64_t) s.st_size, fd);
+ if (r)
+ {
+ add_mhd_last_modified (r, s.st_mtime);
+ if (verbose)
+ obatched(clog) << "serving file from upstream dbgserver/cache" << endl;
+ return r; // NB: don't close fd; libmicrohttpd will
+ }
+ }
+ close (fd);
+ }
+
+ throw reportable_exception(MHD_HTTP_NOT_FOUND, "not found");
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+static struct MHD_Response*
+handle_metrics (struct MHD_Connection *connection)
+{
+ throw reportable_exception("not yet implemented 2");
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+/* libmicrohttpd callback */
+static int
+handler_cb (void *cls __attribute__ ((unused)),
+ struct MHD_Connection *connection,
+ const char *url,
+ const char *method,
+ const char *version __attribute__ ((unused)),
+ const char *upload_data __attribute__ ((unused)),
+ size_t * upload_data_size __attribute__ ((unused)),
+ void ** con_cls __attribute__ ((unused)))
+{
+ struct MHD_Response *r = NULL;
+ string url_copy = url;
+
+ if (verbose)
+ obatched(clog) << conninfo(connection) << " " << method << " " << url << endl;
+
+ try
+ {
+ if (string(method) != "GET")
+ throw reportable_exception(400, _("we support GET only"));
+
+ /* Start decoding the URL. */
+ size_t slash1 = url_copy.find('/', 1);
+ string url1 = url_copy.substr(0, slash1); // ok even if slash1 not found
+
+ if (slash1 != string::npos && url1 == "/buildid")
+ {
+ size_t slash2 = url_copy.find('/', slash1+1);
+ if (slash2 == string::npos)
+ throw reportable_exception(_("/buildid/ webapi error, need buildid"));
+
+ string buildid = url_copy.substr(slash1+1, slash2-slash1-1);
+
+ size_t slash3 = url_copy.find('/', slash2+1);
+ string artifacttype, suffix;
+ if (slash3 == string::npos)
+ {
+ artifacttype = url_copy.substr(slash2+1);
+ suffix = "";
+ }
+ else
+ {
+ artifacttype = url_copy.substr(slash2+1, slash3-slash2-1);
+ suffix = url_copy.substr(slash3); // include the slash in the suffix
+ }
+
+ r = handle_buildid(connection, buildid, artifacttype, suffix);
+ }
+ else if (url1 == "/metrics")
+ r = handle_metrics(connection);
+ else
+ throw reportable_exception(_("webapi error, unrecognized /operation"));
+
+ if (r == 0)
+ throw reportable_exception(_("internal error, missing response"));
+
+ int rc = MHD_queue_response (connection, MHD_HTTP_OK, r);
+ MHD_destroy_response (r);
+ return rc;
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ return e.mhd_send_response (connection);
+ }
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+// borrowed from src/nm.c get_local_names()
+
+static void
+dwarf_extract_source_paths (Elf *elf, GElf_Ehdr* ehdr, Elf_Scn* scn, GElf_Shdr* shdr, vector<string>& debug_sourcefiles)
+{
+ Dwarf* dbg = dwarf_begin_elf (elf, DWARF_C_READ, NULL);
+ if (dbg == NULL)
+ return;
+
+ Dwarf_Off offset = 0;
+ Dwarf_Off old_offset;
+ size_t hsize;
+
+ while (dwarf_nextcu (dbg, old_offset = offset, &offset, &hsize, NULL, NULL, NULL) == 0)
+ {
+ Dwarf_Die cudie_mem;
+ Dwarf_Die *cudie = dwarf_offdie (dbg, old_offset + hsize, &cudie_mem);
+
+ if (cudie == NULL)
+ continue;
+ if (dwarf_tag (cudie) != DW_TAG_compile_unit)
+ continue;
+
+ const char *cuname = dwarf_diename(cudie) ?: "unknown";
+
+ Dwarf_Files *files;
+ size_t nfiles;
+ if (dwarf_getsrcfiles (cudie, &files, &nfiles) != 0)
+ continue;
+
+ // extract DW_AT_comp_dir to resolve relative file names
+ const char *comp_dir = "";
+ const char *const *dirs;
+ size_t ndirs;
+ if (dwarf_getsrcdirs (files, &dirs, &ndirs) == 0 &&
+ dirs[0] != NULL)
+ comp_dir = dirs[0];
+
+ if (verbose > 3)
+ obatched(clog) << "Searching for sources for cu=" << cuname << " comp_dir=" << comp_dir
+ << " #files=" << nfiles << " #dirs=" << ndirs << endl;
+
+ for (size_t f = 1; f < nfiles; f++)
+ {
+ const char *hat = dwarf_filesrc (files, f, NULL, NULL);
+ if (hat == NULL)
+ continue;
+
+ string waldo;
+ if (hat[0] == '/') // absolute
+ waldo = (string (hat));
+ else // comp_dir relative
+ waldo = (string (comp_dir) + string("/") + string (hat));
+
+ // NB: this is the 'waldo' that a dbginfo client will have
+ // to supply for us to give them the file The comp_dir
+ // prefixing is a definite complication. Otherwise we'd
+ // have to return a setof comp_dirs (one per CU!) with
+ // corresponding filesrc[] names, instead of one absolute
+ // resoved set. Maybe we'll have to do that anyway. XXX
+
+ if (verbose > 4)
+ obatched(clog) << waldo << endl;
+
+ debug_sourcefiles.push_back (waldo);
+ }
+ }
+
+ dwarf_end(dbg);
+}
+
+
+
+static void
+elf_classify (int fd, bool &executable_p, bool &debuginfo_p, string &buildid, vector<string>& debug_sourcefiles)
+{
+ Elf *elf = elf_begin (fd, ELF_C_READ_MMAP_PRIVATE, NULL);
+ if (elf == NULL)
+ return;
+
+ try // catch our types of errors and clean up the Elf* object
+ {
+ if (elf_kind (elf) != ELF_K_ELF)
+ {
+ elf_end (elf);
+ return;
+ }
+
+ GElf_Ehdr ehdr_storage;
+ GElf_Ehdr *ehdr = gelf_getehdr (elf, &ehdr_storage);
+ if (ehdr == NULL)
+ {
+ elf_end (elf);
+ return;
+ }
+ auto elf_type = ehdr->e_type;
+
+ const void *build_id; // elfutils-owned memory
+ ssize_t sz = dwelf_elf_gnu_build_id (elf, & build_id);
+ if (sz <= 0)
+ {
+ // It's not a diagnostic-worthy error for an elf file to lack build-id.
+ // It might just be very old.
+ elf_end (elf);
+ return;
+ }
+
+ // build_id is a raw byte array; convert to hexadecimal *lowercase*
+ unsigned char* build_id_bytes = (unsigned char*) build_id;
+ for (ssize_t idx=0; idx<sz; idx++)
+ {
+ buildid += "0123456789abcdef"[build_id_bytes[idx] >> 4];
+ buildid += "0123456789abcdef"[build_id_bytes[idx] & 0xf];
+ }
+
+ // now decide whether it's an executable - namely, any allocatable section has
+ // PROGBITS;
+ if (elf_type == ET_EXEC || elf_type == ET_DYN)
+ {
+ size_t shnum;
+ int rc = elf_getshdrnum (elf, &shnum);
+ if (rc < 0)
+ throw elfutils_exception(rc, "getshdrnum");
+
+ executable_p = false;
+ for (size_t sc = 0; sc < shnum; sc++)
+ {
+ Elf_Scn *scn = elf_getscn (elf, sc);
+ if (scn == NULL)
+ continue;
+
+ GElf_Shdr shdr_mem;
+ GElf_Shdr *shdr = gelf_getshdr (scn, &shdr_mem);
+ if (shdr == NULL)
+ continue;
+
+ // allocated (loadable / vm-addr-assigned) section with available content?
+ if ((shdr->sh_type == SHT_PROGBITS) && (shdr->sh_flags & SHF_ALLOC))
+ {
+ if (verbose > 5)
+ obatched(clog) << "executable due to SHF_ALLOC SHT_PROGBITS sc=" << sc << endl;
+ executable_p = true;
+ break; // no need to keep looking for others
+ }
+ } // iterate over sections
+ } // executable_p classification
+
+ // now decide whether it's a debuginfo - namely, if it has any .debug* or .zdebug* sections
+ // logic mostly stolen from fweimer@redhat.com's elfclassify drafts
+ size_t shstrndx;
+ int rc = elf_getshdrstrndx (elf, &shstrndx);
+ if (rc < 0)
+ throw elfutils_exception(rc, "getshdrstrndx");
+
+ Elf_Scn *scn = NULL;
+ while (true)
+ {
+ scn = elf_nextscn (elf, scn);
+ if (scn == NULL)
+ break;
+ GElf_Shdr shdr_storage;
+ GElf_Shdr *shdr = gelf_getshdr (scn, &shdr_storage);
+ if (shdr == NULL)
+ break;
+ const char *section_name = elf_strptr (elf, shstrndx, shdr->sh_name);
+ if (section_name == NULL)
+ break;
+ if (strncmp(section_name, ".debug_line", 11) == 0 ||
+ strncmp(section_name, ".zdebug_line", 12) == 0)
+ {
+ debuginfo_p = true;
+ dwarf_extract_source_paths (elf, ehdr, scn, shdr, debug_sourcefiles);
+ }
+ else if (strncmp(section_name, ".debug_", 7) == 0 ||
+ strncmp(section_name, ".zdebug_", 8) == 0)
+ {
+ debuginfo_p = true;
+ // NB: don't break; need to parse .debug_line for sources
+ }
+ }
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+ elf_end (elf);
+}
+
+
+
+static void
+scan_source_file_path (const string& dir)
+{
+ sqlite_ps ps_upsert_buildids (db, "insert or ignore into " BUILDIDS "_buildids VALUES (NULL, ?);");
+ sqlite_ps ps_upsert_files (db, "insert or ignore into " BUILDIDS "_files VALUES (NULL, ?);");
+ sqlite_ps ps_upsert (db,
+ "insert or replace into " BUILDIDS "_norm "
+ "(buildid, artifacttype, artifactsrc, mtime, sourcetype, source0) "
+ "values ((select id from " BUILDIDS "_buildids where hex = ?),"
+ " ?,"
+ " (select id from " BUILDIDS "_files where name = ?), ?, 'F',"
+ " (select id from " BUILDIDS "_files where name = ?));");
+ sqlite_ps ps_query (db,
+ "select 1 from " BUILDIDS "_norm where sourcetype = 'F' and source0 = (select id from " BUILDIDS "_files where name = ?) and mtime = ?;");
+ sqlite_ps ps_cleanup (db, "delete from " BUILDIDS "_norm where mtime < ? and sourcetype = 'F' and source0 = (select id from " BUILDIDS "_files where name = ?);");
+ // find the source BOLOs
+ sqlite_ps ps_bolo_insert (db, "insert or ignore into " BUILDIDS "_bolo values (?, ?, 'F', ?);");
+ sqlite_ps ps_bolo_find (db, "select buildid,srcname from " BUILDIDS "_bolo where sourcetype = 'F' and dirname = ?;");
+ sqlite_ps ps_bolo_nuke (db, "delete from " BUILDIDS "_bolo where sourcetype = 'F' and dirname = ?;");
+
+ char * const dirs[] = { (char*) dir.c_str(), NULL };
+
+ struct timeval tv_start, tv_end;
+ unsigned fts_scanned=0, fts_cached=0, fts_debuginfo=0, fts_executable=0, fts_sourcefiles=0;
+ gettimeofday (&tv_start, NULL);
+
+ FTS *fts = fts_open (dirs,
+ FTS_PHYSICAL /* don't follow symlinks */
+ | FTS_XDEV /* don't cross devices/mountpoints */
+ | FTS_NOCHDIR /* multithreaded */,
+ NULL);
+ if (fts == NULL)
+ {
+ obatched(cerr) << "cannot fts_open " << dir << endl;
+ return;
+ }
+
+ vector<string> directory_stack; // to allow knowledge of fts $DIR
+ FTSENT *f;
+ while ((f = fts_read (fts)) != NULL)
+ {
+ fts_scanned ++;
+ if (interrupted)
+ break;
+
+ if (verbose > 3)
+ obatched(clog) << "fts traversing " << f->fts_path << endl;
+
+ try
+ {
+ /* Found a file. Convert it to an absolute path, so
+ the buildid database does not have relative path
+ names that are unresolvable from a subsequent run
+ in a different cwd. */
+ char *rp = realpath(f->fts_path, NULL);
+ if (rp == NULL)
+ throw libc_exception(errno, "fts realpath " + string(f->fts_path));
+ string rps = string(rp);
+ free (rp);
+
+ int rc = 0;
+ switch (f->fts_info)
+ {
+ case FTS_D:
+ directory_stack.push_back (rps);
+ break;
+
+ case FTS_DP:
+ directory_stack.pop_back ();
+ // Finished traversing this directory (hierarchy). Check for any source files that can be
+ // reached from here.
+
+ sqlite3_reset (ps_bolo_find);
+ rc = sqlite3_bind_text (ps_bolo_find, 1, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo-find bind1");
+
+ while (1)
+ {
+ rc = sqlite3_step (ps_bolo_find);
+ if (rc == SQLITE_DONE)
+ break;
+ else if (rc == SQLITE_ROW) // i.e., a result, as opposed to DONE (no results)
+ {
+ string buildid = string((const char*) sqlite3_column_text (ps_bolo_find, 0) ?: "NULL"); // NULL can't happen
+ string dwarfsrc = string((const char*) sqlite3_column_text (ps_bolo_find, 1) ?: "NULL"); // NULL can't happen
+
+ string srcpath;
+ if (dwarfsrc.size() > 0 && dwarfsrc[0] == '/') // src file name is absolute, use as is
+ srcpath = dwarfsrc;
+ else
+ srcpath = rps + string("/") + dwarfsrc; // XXX: should not happen; elf_classify only gives back /absolute files
+
+ char *srp = realpath(srcpath.c_str(), NULL);
+ if (srp == NULL)
+ continue; // unresolvable files are not a serious problem
+ // throw libc_exception(errno, "fts realpath " + srcpath);
+ string srps = string(srp);
+ free (srp);
+
+ struct stat sfs;
+ rc = stat(srps.c_str(), &sfs);
+ if (rc == 0)
+ {
+ if (verbose > 2)
+ obatched(clog) << "recorded buildid=" << buildid << " file=" << srps
+ << " mtime=" << sfs.st_mtime
+ << " as source " << dwarfsrc << endl;
+
+ // register this file name in the interning table
+ sqlite3_reset (ps_upsert_files);
+ rc = sqlite3_bind_text (ps_upsert_files, 1, srps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo-file bind");
+ rc = sqlite3_step (ps_upsert_files);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 bolo-file execute");
+
+ // register the dwarfsrc name in the interning table too
+ sqlite3_reset (ps_upsert_files);
+ rc = sqlite3_bind_text (ps_upsert_files, 1, dwarfsrc.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo-file bind");
+ rc = sqlite3_step (ps_upsert_files);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 bolo-file execute");
+
+ sqlite3_reset (ps_upsert);
+ rc = sqlite3_bind_text (ps_upsert, 1, buildid.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo upsert bind1");
+ rc = sqlite3_bind_text (ps_upsert, 2, "S", -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo upsert bind2");
+ rc = sqlite3_bind_text (ps_upsert, 3, dwarfsrc.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo upsert bind2");
+ rc = sqlite3_bind_int64 (ps_upsert, 4, (int64_t) sfs.st_mtime);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo upsert bind3");
+ rc = sqlite3_bind_text (ps_upsert, 5, srps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo upsert bind3");
+
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 bolo upsert execute");
+ }
+ }
+ else
+ throw sqlite_exception(rc, "sqlite3 bolo-find step");
+ } // loop over bolo records
+
+ if (verbose > 2)
+ obatched(clog) << "nuking bolo for directory=" << rps << endl;
+
+ // ditch matching bolo records so we don't repeat search
+ sqlite3_reset (ps_bolo_nuke);
+ rc = sqlite3_bind_text (ps_bolo_nuke, 1, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 bolo-nuke bind1");
+ rc = sqlite3_step (ps_bolo_nuke);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 bolo-nuke execute");
+
+ break;
+
+ case FTS_F:
+ {
+ /* See if we know of it already. */
+ sqlite3_reset (ps_query); // to allow rebinding / reexecution
+ int rc = sqlite3_bind_text (ps_query, 1, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 file query bind1");
+ rc = sqlite3_bind_int64 (ps_query, 2, (int64_t) f->fts_statp->st_mtime);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 file query bind2");
+ rc = sqlite3_step (ps_query);
+ if (rc == SQLITE_ROW) // i.e., a result, as opposed to DONE (no results)
+ // no need to recheck a file/version we already know
+ // specifically, no need to elf-begin a file we already determined is non-elf
+ // (so is stored with buildid=NULL)
+ {
+ fts_cached ++;
+ continue;
+ }
+
+ bool executable_p = false, debuginfo_p = false; // E and/or D
+ string buildid;
+ vector<string> sourcefiles;
+
+ int fd = open (rps.c_str(), O_RDONLY);
+ try
+ {
+ if (fd >= 0)
+ elf_classify (fd, executable_p, debuginfo_p, buildid, sourcefiles);
+ else
+ throw libc_exception(errno, string("open ") + rps);
+ }
+
+ // NB: we catch exceptions from elf_classify here too, so that we can
+ // cache the corrupt-elf case (!executable_p && !debuginfo_p) just below,
+ // just as if we had an EPERM error from open(2).
+
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+
+ if (fd >= 0)
+ close (fd);
+
+ // register this file name in the interning table
+ sqlite3_reset (ps_upsert_files);
+ rc = sqlite3_bind_text (ps_upsert_files, 1, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-file bind");
+ rc = sqlite3_step (ps_upsert_files);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-file execute");
+
+ if (buildid == "")
+ {
+ sqlite3_reset (ps_upsert); // to allow rebinding / reexecution
+ rc = sqlite3_bind_null (ps_upsert, 1);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind1");
+ // no point storing an elf file without buildid
+ executable_p = false;
+ debuginfo_p = false;
+ }
+ else
+ {
+ // register this build-id in the interning table
+ sqlite3_reset (ps_upsert_buildids);
+ rc = sqlite3_bind_text (ps_upsert_buildids, 1, buildid.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-buildid bind");
+ rc = sqlite3_step (ps_upsert_buildids);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-buildid execute");
+
+ sqlite3_reset (ps_upsert); // to allow rebinding / reexecution
+ rc = sqlite3_bind_text (ps_upsert, 1, buildid.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind1");
+ }
+
+ // artifacttype column 2 set later
+ rc = sqlite3_bind_null (ps_upsert, 3); // no artifactsrc for D/E
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind3");
+ rc = sqlite3_bind_int64 (ps_upsert, 4, (int64_t) f->fts_statp->st_mtime);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind4");
+ rc = sqlite3_bind_text (ps_upsert, 5, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind5");
+
+ if (executable_p)
+ {
+ fts_executable ++;
+ rc = sqlite3_bind_text (ps_upsert, 2, "E", -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-E bind2");
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-E execute");
+ }
+
+ if (debuginfo_p)
+ {
+ fts_debuginfo ++;
+ sqlite3_reset (ps_upsert); // to allow rebinding / reexecution
+ rc = sqlite3_bind_text (ps_upsert, 2, "D", -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-D bind2");
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-D execute");
+ }
+
+ if (sourcefiles.size() && buildid != "")
+ {
+ fts_sourcefiles += sourcefiles.size();
+ string sourcedir = directory_stack.back ();
+
+ for (auto sf : sourcefiles)
+ {
+ sqlite3_reset (ps_bolo_insert);
+ rc = sqlite3_bind_text (ps_bolo_insert, 1, buildid.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-bolo bind1");
+ rc = sqlite3_bind_text (ps_bolo_insert, 2, sf.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-bolo bind2");
+ rc = sqlite3_bind_text (ps_bolo_insert, 3, sourcedir.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-bolo bind3");
+
+ rc = sqlite3_step (ps_bolo_insert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-bolo execute");
+ }
+
+
+
+ }
+
+ if (! (executable_p || debuginfo_p)) // negative hit
+ {
+ rc = sqlite3_bind_null (ps_upsert, 2);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-NULL bind2");
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-NULL execute");
+ }
+
+ // clean up any older entries for this file, in case it was replaced/recompiled to new buildid
+ sqlite3_reset (ps_cleanup);
+ rc = sqlite3_bind_int64 (ps_cleanup, 1, (int64_t) f->fts_statp->st_mtime);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 cleanup bind1");
+ rc = sqlite3_bind_text (ps_cleanup, 2, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 cleanup bind2");
+ rc = sqlite3_step (ps_cleanup);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 cleanup exec");
+
+ if (verbose > 2)
+ obatched(clog) << "recorded buildid=" << buildid << " file=" << rps
+ << " mtime=" << f->fts_statp->st_mtime << " as "
+ << (executable_p ? "executable" : "not executable") << " and "
+ << (debuginfo_p ? "debuginfo" : "not debuginfo") << endl;
+
+
+
+ // XXX: delete earlier records for the same file (mtime < this_mtime)
+ }
+ break;
+
+ case FTS_ERR:
+ case FTS_NS:
+ throw libc_exception(f->fts_errno, string("fts traversal ") + string(f->fts_path));
+
+ default:
+ case FTS_SL: /* NB: don't enter symbolic links into the database */
+ break;
+ }
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+ }
+ fts_close (fts);
+
+ gettimeofday (&tv_end, NULL);
+ double deltas = (tv_end.tv_sec - tv_start.tv_sec) + (tv_end.tv_usec - tv_start.tv_usec)*0.000001;
+
+ if (verbose > 1)
+ obatched(clog) << "fts traversed " << dir << " in " << deltas << "s, scanned=" << fts_scanned
+ << ", cached=" << fts_cached << ", debuginfo=" << fts_debuginfo
+ << ", executable=" << fts_executable << ", source=" << fts_sourcefiles << endl;
+}
+
+
+static void*
+thread_main_scan_source_file_path (void* arg)
+{
+ string dir = string((const char*) arg);
+ if (verbose > 2)
+ obatched(clog) << "file-path scanning " << dir << endl;
+
+ unsigned rescan_timer = 0;
+ while (! interrupted)
+ {
+ try
+ {
+ if (rescan_timer == 0)
+ scan_source_file_path (dir);
+ }
+ catch (const sqlite_exception& e)
+ {
+ obatched(cerr) << e.message << endl;
+ }
+ sleep (1);
+ rescan_timer ++;
+ if (rescan_s)
+ rescan_timer %= rescan_s;
+ }
+
+ return 0;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+
+// Analyze given *.rpm file of given age; record buildids / exec/debuginfo-ness of its
+// constituent files with given upsert statement.
+static void
+rpm_classify (const string& rps, sqlite_ps& ps_upsert, time_t mtime,
+ unsigned& fts_executable, unsigned& fts_debuginfo)
+{
+ string popen_cmd = string("/usr/bin/rpm2cpio " + /* XXX sh-meta-escape */ rps);
+ FILE* fp = popen (popen_cmd.c_str(), "r"); // "e" O_CLOEXEC?
+ if (fp == NULL)
+ throw libc_exception (errno, string("popen ") + popen_cmd);
+ defer_dtor<FILE*,int> fp_closer (fp, pclose);
+
+ struct archive *a;
+ a = archive_read_new();
+ if (a == NULL)
+ throw archive_exception("cannot create archive reader");
+ defer_dtor<struct archive*,int> archive_closer (a, archive_read_free);
+
+ int rc = archive_read_support_format_cpio(a);
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot select cpio format");
+ rc = archive_read_support_filter_all(a); // XXX: or _none()? are these cpio's compressed at this point?
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot select all filters");
+
+ rc = archive_read_open_FILE (a, fp);
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot open archive from rpm2cpio pipe");
+
+ if (verbose > 3)
+ obatched(clog) << "rpm2cpio|libarchive scanning " << rps << endl;
+
+ while(1) // parse cpio archive entries
+ {
+ try
+ {
+ struct archive_entry *e;
+ rc = archive_read_next_header (a, &e);
+ if (rc != ARCHIVE_OK)
+ break;
+
+ if (! S_ISREG(archive_entry_mode (e))) // skip non-files completely
+ continue;
+
+ string fn = archive_entry_pathname (e);
+
+ if (verbose > 3)
+ obatched(clog) << "rpm2cpio|libarchive checking " << fn << endl;
+
+ // extract this file to a temporary file
+ char tmppath[PATH_MAX] = "/tmp/dbgserver.XXXXXX"; // XXX: $TMP_DIR etc.
+ int fd = mkstemp (tmppath);
+ if (fd < 0)
+ throw libc_exception (errno, "cannot create temporary file");
+ unlink (tmppath); // unlink now so OS will release the file as soon as we close the fd
+ defer_dtor<int,int> minifd_closer (fd, close);
+
+ rc = archive_read_data_into_fd (a, fd);
+ if (rc != ARCHIVE_OK)
+ throw archive_exception(a, "cannot extract file");
+
+ // finally ... time to run elf_classify on this bad boy and update the database
+ bool executable_p = false, debuginfo_p = false;
+ string buildid;
+ vector<string> sourcefiles;
+ elf_classify (fd, executable_p, debuginfo_p, buildid, sourcefiles);
+ // NB: might throw
+
+ // NB: we record only executable_p || debuginfo_p case here,
+ // not the 'neither' case.
+
+ sqlite3_reset (ps_upsert); // to allow rebinding / reexecution
+ rc = sqlite3_bind_text (ps_upsert, 1, buildid.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind1");
+ rc = sqlite3_bind_text (ps_upsert, 4, buildid.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind1");
+ rc = sqlite3_bind_int64 (ps_upsert, 6, (int64_t) mtime); // XXX: caller could do this for us
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind3");
+ rc = sqlite3_bind_text (ps_upsert, 3, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind4");
+ rc = sqlite3_bind_text (ps_upsert, 7, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind4");
+ rc = sqlite3_bind_text (ps_upsert, 8, fn.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind5");
+ rc = sqlite3_bind_text (ps_upsert, 2, fn.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind5");
+
+ if (executable_p)
+ {
+ fts_executable ++;
+ rc = sqlite3_bind_text (ps_upsert, 5, "E", -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-E bind2");
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-E execute");
+ }
+
+ if (debuginfo_p)
+ {
+ fts_debuginfo ++;
+ sqlite3_reset (ps_upsert); // to allow rebinding / reexecution
+ rc = sqlite3_bind_text (ps_upsert, 5, "D", -1, SQLITE_STATIC);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-D bind2");
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-D execute");
+ }
+
+ if ((verbose > 2) && (executable_p || debuginfo_p))
+ obatched(clog) << "recorded buildid=" << buildid << " rpm=" << rps << " file=" << fn
+ << " mtime=" << mtime << " as "
+ << (executable_p ? "executable" : "not executable") << " and "
+ << (debuginfo_p ? "debuginfo" : "not debuginfo") << endl;
+
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+ }
+}
+
+
+
+// scan for *.rpm files
+static void
+scan_source_rpm_path (const string& dir)
+{
+ sqlite_ps ps_upsert (db,
+ "insert or ignore into " BUILDIDS "_buildids VALUES (NULL, ?);"
+ "insert or ignore into " BUILDIDS "_files VALUES (NULL, ?);"
+ "insert or ignore into " BUILDIDS "_files VALUES (NULL, ?);"
+ "insert or replace into " BUILDIDS "_norm (buildid, artifacttype, mtime," // XXX: artifactsrc
+ "sourcetype, source0, source1) values ((select id from " BUILDIDS "_buildids where hex = ?), ?, ?, 'F',"
+ "(select id from " BUILDIDS "_files where name = ?),"
+ "(select id from " BUILDIDS "_files where name = ?));");
+ sqlite_ps ps_query (db,
+ "select 1 from " BUILDIDS " where sourcetype = 'R' and source0 = ? and mtime = ?;");
+
+ char * const dirs[] = { (char*) dir.c_str(), NULL };
+
+ struct timeval tv_start, tv_end;
+ unsigned fts_scanned=0, fts_cached=0, fts_debuginfo=0, fts_executable=0, fts_rpm = 0;
+ gettimeofday (&tv_start, NULL);
+
+ FTS *fts = fts_open (dirs,
+ FTS_PHYSICAL /* don't follow symlinks */
+ | FTS_XDEV /* don't cross devices/mountpoints */
+ | FTS_NOCHDIR /* multithreaded */,
+ NULL);
+ if (fts == NULL)
+ {
+ obatched(cerr) << "cannot fts_open " << dir << endl;
+ return;
+ }
+
+ FTSENT *f;
+ while ((f = fts_read (fts)) != NULL)
+ {
+ fts_scanned ++;
+ if (interrupted)
+ break;
+
+ if (verbose > 3)
+ obatched(clog) << "fts/rpm traversing " << f->fts_path << endl;
+
+ try
+ {
+ switch (f->fts_info)
+ {
+ case FTS_F:
+ {
+ /* Found a file. Convert it to an absolute path, so
+ the buildid database does not have relative path
+ names that are unresolvable from a subsequent run
+ in a different cwd. */
+ char *rp = realpath(f->fts_path, NULL);
+ if (rp == NULL)
+ throw libc_exception(errno, "fts realpath " + string(f->fts_path));
+ string rps = string(rp);
+ free (rp);
+
+ // heuristic: reject if file name does not end with ".rpm"
+ // (alternative: try opening with librpm etc., caching)
+ string suffix = ".rpm";
+ if (rps.size() < suffix.size() ||
+ rps.substr(rps.size()-suffix.size()) != suffix)
+ // !equal(rps.begin()+rps.size()-suffix.size(), rps.end(), suffix.begin()))
+ continue;
+ fts_rpm ++;
+
+ /* See if we know of it already. */
+ sqlite3_reset (ps_query); // to allow rebinding / reexecution
+ int rc = sqlite3_bind_text (ps_query, 1, rps.c_str(), -1, SQLITE_TRANSIENT);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 file query bind1");
+ rc = sqlite3_bind_int64 (ps_query, 2, (int64_t) f->fts_statp->st_mtime);
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 file query bind2");
+ rc = sqlite3_step (ps_query);
+ if (rc == SQLITE_ROW) // i.e., a result, as opposed to DONE (no results)
+ // no need to recheck a file/version we already know
+ // specifically, no need to elf-begin a file we already determined is non-elf
+ // (so is stored with buildid=NULL)
+ {
+ fts_cached ++;
+ continue;
+ }
+
+ // extract the rpm contents via popen("rpm2cpio") | libarchive | loop-of-elf_classify()
+ unsigned my_fts_executable = 0, my_fts_debuginfo = 0;
+ try
+ {
+ rpm_classify (rps, ps_upsert, f->fts_statp->st_mtime, my_fts_executable, my_fts_debuginfo);
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+
+ fts_executable += my_fts_executable;
+ fts_debuginfo += my_fts_debuginfo;
+
+ // unreadable or corrupt or non-ELF-carrying rpm: cache negative
+ if (my_fts_executable == 0 && my_fts_debuginfo == 0)
+ {
+ sqlite3_reset (ps_upsert); // to allow rebinding / reexecution
+ rc = sqlite3_bind_null (ps_upsert, 1); // buildid
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-NULL bind1");
+ rc = sqlite3_bind_null (ps_upsert, 2); // artifacttype
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-NULL bind2");
+ rc = sqlite3_bind_int64 (ps_upsert, 3, (int64_t) f->fts_statp->st_mtime); // mtime
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind3");
+ rc = sqlite3_bind_text (ps_upsert, 4, rps.c_str(), -1, SQLITE_TRANSIENT); // source0
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert bind4");
+ rc = sqlite3_bind_null (ps_upsert, 5); // source1
+ if (rc != SQLITE_OK)
+ throw sqlite_exception(rc, "sqlite3 upsert-NULL bind5");
+ rc = sqlite3_step (ps_upsert);
+ if (rc != SQLITE_OK && rc != SQLITE_DONE)
+ throw sqlite_exception(rc, "sqlite3 upsert-NULL execute");
+ }
+ }
+ break;
+
+ case FTS_ERR:
+ case FTS_NS:
+ throw libc_exception(f->fts_errno, string("fts traversal ") + string(f->fts_path));
+
+ default:
+ case FTS_SL: /* NB: don't enter symbolic links into the database */
+ break;
+ }
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+ }
+ fts_close (fts);
+
+ gettimeofday (&tv_end, NULL);
+ double deltas = (tv_end.tv_sec - tv_start.tv_sec) + (tv_end.tv_usec - tv_start.tv_usec)*0.000001;
+
+ if (verbose > 1)
+ obatched(clog) << "fts/rpm traversed " << dir << " in " << deltas << "s, scanned=" << fts_scanned
+ << ", rpm=" << fts_rpm << ", cached=" << fts_cached << ", debuginfo=" << fts_debuginfo
+ << ", executable=" << fts_executable << endl;
+}
+
+
+
+static void*
+thread_main_scan_source_rpm_path (void* arg)
+{
+ string dir = string((const char*) arg);
+ if (verbose > 2)
+ obatched(clog) << "rpm-path scanning " << dir << endl;
+
+ unsigned rescan_timer = 0;
+ while (! interrupted)
+ {
+ try
+ {
+ if (rescan_timer == 0)
+ scan_source_rpm_path (dir);
+ }
+ catch (const sqlite_exception& e)
+ {
+ obatched(cerr) << e.message << endl;
+ }
+ sleep (1);
+ rescan_timer ++;
+ if (rescan_s)
+ rescan_timer %= rescan_s;
+ }
+
+ return 0;
+}
+
+
+////////////////////////////////////////////////////////////////////////
+
+
+static void
+signal_handler (int /* sig */)
+{
+ interrupted ++;
+
+ // NB: don't do anything else in here
+}
+
+
+
+int
+main (int argc, char *argv[])
+{
+ (void) setlocale (LC_ALL, "");
+ (void) bindtextdomain (PACKAGE_TARNAME, LOCALEDIR);
+ (void) textdomain (PACKAGE_TARNAME);
+
+ /* Tell the library which version we are expecting. */
+ elf_version (EV_CURRENT);
+
+ /* Set default values. */
+ http_port = 8002;
+ db_path = string(getenv("HOME") ?: "/") + string("/.dbgserver.sqlite"); /* XDG? */
+
+ /* Parse and process arguments. */
+ int remaining;
+ (void) argp_parse (&argp, argc, argv, ARGP_IN_ORDER|ARGP_NO_ARGS, &remaining, NULL);
+ if (remaining != argc)
+ error (EXIT_FAILURE, 0,
+ _("unexpected argument: %s"), argv[remaining]);
+
+ (void) signal (SIGPIPE, SIG_IGN); // microhttpd can generate it incidentally, ignore
+ (void) signal (SIGINT, signal_handler); // ^C
+ (void) signal (SIGHUP, signal_handler); // EOF
+ (void) signal (SIGTERM, signal_handler); // systemd
+
+ /* Get database ready. */
+ int rc;
+ rc = sqlite3_open_v2 (db_path.c_str(), &db, (SQLITE_OPEN_READWRITE
+ |SQLITE_OPEN_CREATE
+ |SQLITE_OPEN_FULLMUTEX), /* thread-safe */
+ NULL);
+ if (rc == SQLITE_CORRUPT)
+ {
+ (void) unlink (db_path.c_str());
+ error (EXIT_FAILURE, 0,
+ _("cannot open %s, deleted database: %s"), db_path.c_str(), sqlite3_errmsg(db));
+ }
+ else if (rc)
+ {
+ error (EXIT_FAILURE, 0,
+ _("cannot open %s, database: %s"), db_path.c_str(), sqlite3_errmsg(db));
+ }
+
+ obatched(clog) << "Opened database " << db_path << endl;
+
+ if (verbose > 3)
+ obatched(clog) << "DDL:\n" << DBGSERVER_SQLITE_DDL << endl;
+
+ rc = sqlite3_exec (db, DBGSERVER_SQLITE_DDL, NULL, NULL, NULL);
+ if (rc != SQLITE_OK)
+ {
+ error (EXIT_FAILURE, 0,
+ _("cannot run database schema ddl: %s"), sqlite3_errmsg(db));
+ }
+
+ if (verbose) // report database stats
+ try
+ {
+ sqlite_ps ps_query (db,
+ "select sourcetype, artifacttype, count(*) from " BUILDIDS
+ " group by sourcetype, artifacttype");
+
+ obatched(clog) << "Database statistics:" << endl;
+ obatched(clog) << "source" << "\t" << "type" << "\t" << "count" << endl;
+ while (1)
+ {
+ rc = sqlite3_step (ps_query);
+ if (rc == SQLITE_DONE) break;
+ if (rc != SQLITE_ROW)
+ throw sqlite_exception(rc, "step");
+
+ obatched(clog) << (sqlite3_column_text(ps_query, 0) ?: (const unsigned char*) "NULL")
+ << "\t"
+ << (sqlite3_column_text(ps_query, 1) ?: (const unsigned char*) "NULL")
+ << "\t"
+ << (sqlite3_column_text(ps_query, 2) ?: (const unsigned char*) "NULL")
+ << endl;
+ }
+ }
+ catch (const reportable_exception& e)
+ {
+ e.report(clog);
+ }
+
+ for (auto&& it : source_file_paths)
+ {
+ pthread_t pt;
+ int rc = pthread_create (& pt, NULL, thread_main_scan_source_file_path, (void*) it.c_str());
+ if (rc < 0)
+ error (0, 0, "Warning: cannot spawn thread (%d) to scan source files %s\n", rc, it.c_str());
+ else
+ source_file_scanner_threads.push_back(pt);
+ }
+
+ for (auto&& it : source_rpm_paths)
+ {
+ pthread_t pt;
+ int rc = pthread_create (& pt, NULL, thread_main_scan_source_rpm_path, (void*) it.c_str());
+ if (rc < 0)
+ error (0, 0, "Warning: cannot spawn thread (%d) to scan source rpms %s\n", rc, it.c_str());
+ else
+ source_rpm_scanner_threads.push_back(pt);
+ }
+
+
+ // Start httpd server threads. Separate pool for IPv4 and IPv6, in
+ // case the host only has one protocol stack.
+ MHD_Daemon *d4 = MHD_start_daemon (MHD_USE_THREAD_PER_CONNECTION
+#if MHD_VERSION >= 0x00095300
+ | MHD_USE_INTERNAL_POLLING_THREAD
+#else
+ | MHD_USE_SELECT_INTERNALLY
+#endif
+ | MHD_USE_DEBUG, /* report errors to stderr */
+ http_port,
+ NULL, NULL, /* default accept policy */
+ handler_cb, NULL, /* handler callback */
+ MHD_OPTION_END);
+ MHD_Daemon *d6 = MHD_start_daemon (MHD_USE_THREAD_PER_CONNECTION
+#if MHD_VERSION >= 0x00095300
+ | MHD_USE_INTERNAL_POLLING_THREAD
+#else
+ | MHD_USE_SELECT_INTERNALLY
+#endif
+ | MHD_USE_IPv6
+ | MHD_USE_DEBUG, /* report errors to stderr */
+ http_port,
+ NULL, NULL, /* default accept policy */
+ handler_cb, NULL, /* handler callback */
+ MHD_OPTION_END);
+
+ if (d4 == NULL && d6 == NULL) // neither ipv4 nor ipv6? boo
+ {
+ sqlite3_close (db);
+ error (EXIT_FAILURE, 0, _("cannot start http server at port %d"), http_port);
+ }
+
+ obatched(clog) << "Started http server on "
+ << (d4 != NULL ? "IPv4 " : "")
+ << (d6 != NULL ? "IPv6 " : "")
+ << "port=" << http_port << endl;
+
+ /* Trivial main loop! */
+ while (! interrupted)
+ pause ();
+
+ if (verbose)
+ obatched(clog) << "Stopping" << endl;
+
+ /* Stop all the web service threads. */
+ if (d4) MHD_stop_daemon (d4);
+ if (d6) MHD_stop_daemon (d6);
+
+ /* Join any source scanning threads. */
+ for (auto&& it : source_file_scanner_threads)
+ pthread_join (it, NULL);
+ for (auto&& it : source_rpm_scanner_threads)
+ pthread_join (it, NULL);
+
+ /* With all threads known dead, we can close the db handle. */
+ sqlite3_close (db);
+
+ return 0;
+}
diff --git a/dbgserver/libdbgserver.map b/dbgserver/libdbgserver.map
new file mode 100644
index 00000000..112c9ca9
--- /dev/null
+++ b/dbgserver/libdbgserver.map
@@ -0,0 +1,7 @@
+ELFUTILS_0 { };
+ELFUTILS_0.177 {
+ global:
+ dbgserver_find_debuginfo;
+ dbgserver_find_executable;
+ dbgserver_find_source;
+} ELFUTILS_0;
diff --git a/libdwfl/Makefile.am b/libdwfl/Makefile.am
index 89ca92ed..d35598ae 100644
--- a/libdwfl/Makefile.am
+++ b/libdwfl/Makefile.am
@@ -31,7 +31,7 @@
##
include $(top_srcdir)/config/eu.am
AM_CPPFLAGS += -I$(srcdir) -I$(srcdir)/../libelf -I$(srcdir)/../libebl \
- -I$(srcdir)/../libdw -I$(srcdir)/../libdwelf
+ -I$(srcdir)/../libdw -I$(srcdir)/../libdwelf -I$(srcdir)/../dbgserver
VERSION = 1
noinst_LIBRARIES = libdwfl.a
@@ -39,6 +39,7 @@ noinst_LIBRARIES += libdwfl_pic.a
pkginclude_HEADERS = libdwfl.h
+
libdwfl_a_SOURCES = dwfl_begin.c dwfl_end.c dwfl_error.c dwfl_version.c \
dwfl_module.c dwfl_report_elf.c relocate.c \
dwfl_module_build_id.c dwfl_module_report_build_id.c \
diff --git a/libdwfl/dwfl_build_id_find_elf.c b/libdwfl/dwfl_build_id_find_elf.c
index cc6c3f62..158cf280 100644
--- a/libdwfl/dwfl_build_id_find_elf.c
+++ b/libdwfl/dwfl_build_id_find_elf.c
@@ -34,7 +34,9 @@
#include <inttypes.h>
#include <fcntl.h>
#include <unistd.h>
+#include <dlfcn.h>
#include "system.h"
+#include "dbgserver-client.h"
int
@@ -187,7 +189,31 @@ dwfl_build_id_find_elf (Dwfl_Module *mod,
free (*file_name);
*file_name = NULL;
}
- else if (errno == 0 && mod->build_id_len > 0)
+#if ENABLE_DBGSERVER
+ else {
+ static void *dbgserver_so;
+ static __typeof__ (dbgserver_find_executable) *fp_dbgserver_find_executable;
+
+ if (dbgserver_so == NULL)
+ dbgserver_so = dlopen("libdbgserver-" VERSION ".so", RTLD_LAZY);
+ if (dbgserver_so == NULL)
+ dbgserver_so = dlopen("libdbgserver.so", RTLD_LAZY);
+ if (dbgserver_so != NULL && fp_dbgserver_find_executable == NULL)
+ fp_dbgserver_find_executable = dlsym (dbgserver_so, "dbgserver_find_executable");
+
+ if (fp_dbgserver_find_executable != NULL)
+ {
+ /* If all else fails and a build-id is available, query the
+ debuginfo-server if enabled. */
+ if (fd < 0 && mod->build_id_len > 0)
+ fd = (*fp_dbgserver_find_executable) (mod->build_id_bits,
+ mod->build_id_len,
+ NULL);
+ }
+ }
+#endif /* ENABLE_DBGSERVER */
+
+ if (fd < 0 && errno == 0 && mod->build_id_len > 0)
/* Setting this with no file yet loaded is a marker that
the build ID is authoritative even if we also know a
putative *FILE_NAME. */
diff --git a/libdwfl/find-debuginfo.c b/libdwfl/find-debuginfo.c
index 9267788d..f73976e1 100644
--- a/libdwfl/find-debuginfo.c
+++ b/libdwfl/find-debuginfo.c
@@ -31,9 +31,13 @@
#endif
#include "libdwflP.h"
+#ifdef ENABLE_DBGSERVER
+#include "dbgserver-client.h"
+#endif
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
+#include <dlfcn.h>
#include <sys/stat.h>
#include "system.h"
@@ -359,7 +363,8 @@ dwfl_standard_find_debuginfo (Dwfl_Module *mod,
other than just by finding nothing, that's all we do. */
const unsigned char *bits;
GElf_Addr vaddr;
- if (INTUSE(dwfl_module_build_id) (mod, &bits, &vaddr) > 0)
+ int bits_len;
+ if ((bits_len = INTUSE(dwfl_module_build_id) (mod, &bits, &vaddr)) > 0)
{
/* Dropping most arguments means we cannot rely on them in
dwfl_build_id_find_debuginfo. But leave it that way since
@@ -397,6 +402,28 @@ dwfl_standard_find_debuginfo (Dwfl_Module *mod,
free (canon);
}
+#if ENABLE_DBGSERVER
+ {
+ static void *dbgserver_so;
+ static __typeof__ (dbgserver_find_debuginfo) *fp_dbgserver_find_debuginfo;
+
+ if (dbgserver_so == NULL)
+ dbgserver_so = dlopen("libdbgserver-" VERSION ".so", RTLD_LAZY);
+ if (dbgserver_so == NULL)
+ dbgserver_so = dlopen("libdbgserver.so", RTLD_LAZY);
+ if (dbgserver_so != NULL && fp_dbgserver_find_debuginfo == NULL)
+ fp_dbgserver_find_debuginfo = dlsym (dbgserver_so, "dbgserver_find_debuginfo");
+
+ if (fp_dbgserver_find_debuginfo != NULL)
+ {
+ /* If all else fails and a build-id is available, query the
+ debuginfo-server if enabled. */
+ if (fd < 0 && bits_len > 0)
+ fd = (*fp_dbgserver_find_debuginfo) (bits, bits_len, NULL);
+ }
+ }
+#endif /* ENABLE_DBGSERVER */
+
return fd;
}
INTDEF (dwfl_standard_find_debuginfo)
diff --git a/m4/ax_check_compile_flag.m4 b/m4/ax_check_compile_flag.m4
new file mode 100644
index 00000000..ca363971
--- /dev/null
+++ b/m4/ax_check_compile_flag.m4
@@ -0,0 +1,74 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT])
+#
+# DESCRIPTION
+#
+# Check whether the given FLAG works with the current language's compiler
+# or gives an error. (Warnings, however, are ignored)
+#
+# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on
+# success/failure.
+#
+# If EXTRA-FLAGS is defined, it is added to the current language's default
+# flags (e.g. CFLAGS) when the check is done. The check is thus made with
+# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to
+# force the compiler to issue an error when a bad flag is given.
+#
+# INPUT gives an alternative input source to AC_COMPILE_IFELSE.
+#
+# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this
+# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Guido U. Draheim <guidod@gmx.de>
+# Copyright (c) 2011 Maarten Bosmans <mkbosmans@gmail.com>
+#
+# This program is free software: you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+
+#serial 4
+
+AC_DEFUN([AX_CHECK_COMPILE_FLAG],
+[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF
+AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl
+AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [
+ ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS
+ _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1"
+ AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])],
+ [AS_VAR_SET(CACHEVAR,[yes])],
+ [AS_VAR_SET(CACHEVAR,[no])])
+ _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags])
+AS_VAR_IF(CACHEVAR,yes,
+ [m4_default([$2], :)],
+ [m4_default([$3], :)])
+AS_VAR_POPDEF([CACHEVAR])dnl
+])dnl AX_CHECK_COMPILE_FLAGS
diff --git a/m4/ax_cxx_compile_stdcxx.m4 b/m4/ax_cxx_compile_stdcxx.m4
new file mode 100644
index 00000000..8adc7656
--- /dev/null
+++ b/m4/ax_cxx_compile_stdcxx.m4
@@ -0,0 +1,556 @@
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
+#
+# DESCRIPTION
+#
+# Check for baseline language coverage in the compiler for the specified
+# version of the C++ standard. If necessary, add switches to CXX to
+# enable support. VERSION may be '11' (for the C++11 standard) or '14'
+# (for the C++14 standard).
+#
+# The second argument, if specified, indicates whether you insist on an
+# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
+# -std=c++11). If neither is specified, you get whatever works, with
+# preference for an extended mode.
+#
+# The third argument, if specified 'mandatory' or if left unspecified,
+# indicates that baseline support for the specified C++ standard is
+# required and that the macro should error out if no mode with that
+# support is found. If specified 'optional', then configuration proceeds
+# regardless, after defining HAVE_CXX${VERSION} if and only if a
+# supporting mode is found.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
+# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
+# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
+# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
+# Copyright (c) 2015 Paul Norman <penorman@mac.com>
+# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 3
+
+dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
+dnl (serial version number 13).
+
+AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
+ m4_if([$1], [11], [],
+ [$1], [14], [],
+ [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])],
+ [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$2], [], [],
+ [$2], [ext], [],
+ [$2], [noext], [],
+ [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
+ [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
+ AC_LANG_PUSH([C++])dnl
+ ac_success=no
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
+ ax_cv_cxx_compile_cxx$1,
+ [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [ax_cv_cxx_compile_cxx$1=yes],
+ [ax_cv_cxx_compile_cxx$1=no])])
+ if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
+ ac_success=yes
+ fi
+
+ m4_if([$2], [noext], [], [dnl
+ if test x$ac_success = xno; then
+ for switch in -std=gnu++$1 -std=gnu++0x; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+
+ m4_if([$2], [ext], [], [dnl
+ if test x$ac_success = xno; then
+ dnl HP's aCC needs +std=c++11 according to:
+ dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
+ dnl Cray's crayCC needs "-h std=c++11"
+ for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+ AC_LANG_POP([C++])
+ if test x$ax_cxx_compile_cxx$1_required = xtrue; then
+ if test x$ac_success = xno; then
+ AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
+ fi
+ fi
+ if test x$ac_success = xno; then
+ HAVE_CXX$1=0
+ AC_MSG_NOTICE([No compiler with C++$1 support was found])
+ else
+ HAVE_CXX$1=1
+ AC_DEFINE(HAVE_CXX$1,1,
+ [define if the compiler supports basic C++$1 syntax])
+ fi
+ AC_SUBST(HAVE_CXX$1)
+])
+
+
+dnl Test body for checking C++11 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+)
+
+
+dnl Test body for checking C++14 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+)
+
+
+dnl Tests for new features in C++11
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
+
+// If the compiler admits that it is not ready for C++11, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201103L
+
+#error "This is not a C++11 compiler"
+
+#else
+
+namespace cxx11
+{
+
+ namespace test_static_assert
+ {
+
+ template <typename T>
+ struct check
+ {
+ static_assert(sizeof(int) <= sizeof(T), "not big enough");
+ };
+
+ }
+
+ namespace test_final_override
+ {
+
+ struct Base
+ {
+ virtual void f() {}
+ };
+
+ struct Derived : public Base
+ {
+ virtual void f() override {}
+ };
+
+ }
+
+ namespace test_double_right_angle_brackets
+ {
+
+ template < typename T >
+ struct check {};
+
+ typedef check<void> single_type;
+ typedef check<check<void>> double_type;
+ typedef check<check<check<void>>> triple_type;
+ typedef check<check<check<check<void>>>> quadruple_type;
+
+ }
+
+ namespace test_decltype
+ {
+
+ int
+ f()
+ {
+ int a = 1;
+ decltype(a) b = 2;
+ return a + b;
+ }
+
+ }
+
+ namespace test_type_deduction
+ {
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static const bool value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static const bool value = true;
+ };
+
+ template < typename T1, typename T2 >
+ auto
+ add(T1 a1, T2 a2) -> decltype(a1 + a2)
+ {
+ return a1 + a2;
+ }
+
+ int
+ test(const int c, volatile int v)
+ {
+ static_assert(is_same<int, decltype(0)>::value == true, "");
+ static_assert(is_same<int, decltype(c)>::value == false, "");
+ static_assert(is_same<int, decltype(v)>::value == false, "");
+ auto ac = c;
+ auto av = v;
+ auto sumi = ac + av + 'x';
+ auto sumf = ac + av + 1.0;
+ static_assert(is_same<int, decltype(ac)>::value == true, "");
+ static_assert(is_same<int, decltype(av)>::value == true, "");
+ static_assert(is_same<int, decltype(sumi)>::value == true, "");
+ static_assert(is_same<int, decltype(sumf)>::value == false, "");
+ static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
+ return (sumf > 0.0) ? sumi : add(c, v);
+ }
+
+ }
+
+ namespace test_noexcept
+ {
+
+ int f() { return 0; }
+ int g() noexcept { return 0; }
+
+ static_assert(noexcept(f()) == false, "");
+ static_assert(noexcept(g()) == true, "");
+
+ }
+
+ namespace test_constexpr
+ {
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
+ {
+ return *s ? strlen_c_r(s + 1, acc + 1) : acc;
+ }
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c(const CharT *const s) noexcept
+ {
+ return strlen_c_r(s, 0UL);
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("1") == 1UL, "");
+ static_assert(strlen_c("example") == 7UL, "");
+ static_assert(strlen_c("another\0example") == 7UL, "");
+
+ }
+
+ namespace test_rvalue_references
+ {
+
+ template < int N >
+ struct answer
+ {
+ static constexpr int value = N;
+ };
+
+ answer<1> f(int&) { return answer<1>(); }
+ answer<2> f(const int&) { return answer<2>(); }
+ answer<3> f(int&&) { return answer<3>(); }
+
+ void
+ test()
+ {
+ int i = 0;
+ const int c = 0;
+ static_assert(decltype(f(i))::value == 1, "");
+ static_assert(decltype(f(c))::value == 2, "");
+ static_assert(decltype(f(0))::value == 3, "");
+ }
+
+ }
+
+ namespace test_uniform_initialization
+ {
+
+ struct test
+ {
+ static const int zero {};
+ static const int one {1};
+ };
+
+ static_assert(test::zero == 0, "");
+ static_assert(test::one == 1, "");
+
+ }
+
+ namespace test_lambdas
+ {
+
+ void
+ test1()
+ {
+ auto lambda1 = [](){};
+ auto lambda2 = lambda1;
+ lambda1();
+ lambda2();
+ }
+
+ int
+ test2()
+ {
+ auto a = [](int i, int j){ return i + j; }(1, 2);
+ auto b = []() -> int { return '0'; }();
+ auto c = [=](){ return a + b; }();
+ auto d = [&](){ return c; }();
+ auto e = [a, &b](int x) mutable {
+ const auto identity = [](int y){ return y; };
+ for (auto i = 0; i < a; ++i)
+ a += b--;
+ return x + identity(a + b);
+ }(0);
+ return a + b + c + d + e;
+ }
+
+ int
+ test3()
+ {
+ const auto nullary = [](){ return 0; };
+ const auto unary = [](int x){ return x; };
+ using nullary_t = decltype(nullary);
+ using unary_t = decltype(unary);
+ const auto higher1st = [](nullary_t f){ return f(); };
+ const auto higher2nd = [unary](nullary_t f1){
+ return [unary, f1](unary_t f2){ return f2(unary(f1())); };
+ };
+ return higher1st(nullary) + higher2nd(nullary)(unary);
+ }
+
+ }
+
+ namespace test_variadic_templates
+ {
+
+ template <int...>
+ struct sum;
+
+ template <int N0, int... N1toN>
+ struct sum<N0, N1toN...>
+ {
+ static constexpr auto value = N0 + sum<N1toN...>::value;
+ };
+
+ template <>
+ struct sum<>
+ {
+ static constexpr auto value = 0;
+ };
+
+ static_assert(sum<>::value == 0, "");
+ static_assert(sum<1>::value == 1, "");
+ static_assert(sum<23>::value == 23, "");
+ static_assert(sum<1, 2>::value == 3, "");
+ static_assert(sum<5, 5, 11>::value == 21, "");
+ static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
+
+ }
+
+ // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
+ // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
+ // because of this.
+ namespace test_template_alias_sfinae
+ {
+
+ struct foo {};
+
+ template<typename T>
+ using member = typename T::member_type;
+
+ template<typename T>
+ void func(...) {}
+
+ template<typename T>
+ void func(member<T>*) {}
+
+ void test();
+
+ void test() { func<foo>(0); }
+
+ }
+
+} // namespace cxx11
+
+#endif // __cplusplus >= 201103L
+
+]])
+
+
+dnl Tests for new features in C++14
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
+
+// If the compiler admits that it is not ready for C++14, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201402L
+
+#error "This is not a C++14 compiler"
+
+#else
+
+namespace cxx14
+{
+
+ namespace test_polymorphic_lambdas
+ {
+
+ int
+ test()
+ {
+ const auto lambda = [](auto&&... args){
+ const auto istiny = [](auto x){
+ return (sizeof(x) == 1UL) ? 1 : 0;
+ };
+ const int aretiny[] = { istiny(args)... };
+ return aretiny[0];
+ };
+ return lambda(1, 1L, 1.0f, '1');
+ }
+
+ }
+
+ namespace test_binary_literals
+ {
+
+ constexpr auto ivii = 0b0000000000101010;
+ static_assert(ivii == 42, "wrong value");
+
+ }
+
+ namespace test_generalized_constexpr
+ {
+
+ template < typename CharT >
+ constexpr unsigned long
+ strlen_c(const CharT *const s) noexcept
+ {
+ auto length = 0UL;
+ for (auto p = s; *p; ++p)
+ ++length;
+ return length;
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("x") == 1UL, "");
+ static_assert(strlen_c("test") == 4UL, "");
+ static_assert(strlen_c("another\0test") == 7UL, "");
+
+ }
+
+ namespace test_lambda_init_capture
+ {
+
+ int
+ test()
+ {
+ auto x = 0;
+ const auto lambda1 = [a = x](int b){ return a + b; };
+ const auto lambda2 = [a = lambda1(x)](){ return a; };
+ return lambda2();
+ }
+
+ }
+
+ namespace test_digit_seperators
+ {
+
+ constexpr auto ten_million = 100'000'000;
+ static_assert(ten_million == 100000000, "");
+
+ }
+
+ namespace test_return_type_deduction
+ {
+
+ auto f(int& x) { return x; }
+ decltype(auto) g(int& x) { return x; }
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static constexpr auto value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static constexpr auto value = true;
+ };
+
+ int
+ test()
+ {
+ auto x = 0;
+ static_assert(is_same<int, decltype(f(x))>::value, "");
+ static_assert(is_same<int&, decltype(g(x))>::value, "");
+ return x;
+ }
+
+ }
+
+} // namespace cxx14
+
+#endif // __cplusplus >= 201402L
+
+]])
diff --git a/src/Makefile.am b/src/Makefile.am
index ab72a0e9..5c6dcc21 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -86,6 +86,7 @@ stack_LDADD = $(libebl) $(libelf) $(libdw) $(libeu) $(argp_LDADD) $(demanglelib)
elfcompress_LDADD = $(libebl) $(libelf) $(libdw) $(libeu) $(argp_LDADD)
elfclassify_LDADD = $(libelf) $(libdw) $(libeu) $(argp_LDADD)
+
installcheck-binPROGRAMS: $(bin_PROGRAMS)
bad=0; pid=$$$$; list="$(bin_PROGRAMS)"; for p in $$list; do \
case ' $(AM_INSTALLCHECK_STD_OPTIONS_EXEMPT) ' in \
diff --git a/tests/Makefile.am b/tests/Makefile.am
index f12e48f8..c47e3644 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -60,6 +60,7 @@ check_PROGRAMS = arextract arsymtest newfile saridx scnnames sectiondump \
fillfile dwarf_default_lower_bound dwarf-die-addr-die \
get-units-invalid get-units-split attr-integrate-skel \
all-dwarf-ranges unit-info next_cfi \
+ dbgserver_build_id_find \
elfcopy addsections xlate_notes elfrdwrnop \
dwelf_elf_e_machine_string
@@ -162,6 +163,7 @@ TESTS = run-arextract.sh run-arsymtest.sh run-ar.sh newfile test-nlist \
run-reverse-sections.sh run-reverse-sections-self.sh \
run-copyadd-sections.sh run-copymany-sections.sh \
run-typeiter-many.sh run-strip-test-many.sh \
+ run-dbgserver-find.sh \
run-strip-version.sh run-xlate-note.sh \
run-readelf-discr.sh \
run-dwelf_elf_e_machine_string.sh \
@@ -434,6 +436,8 @@ EXTRA_DIST = run-arextract.sh run-arsymtest.sh run-ar.sh \
testfile-debug-rel-ppc64.o.bz2 \
run-strip-version.sh testfile-version.bz2 \
run-xlate-note.sh \
+ run-dbgserver-find.sh testfile-dbgserver.debug.bz2 \
+ testfile-dbgserver.exec.bz2 \
run-readelf-discr.sh \
testfile-rng.debug.bz2 testfile-urng.debug.bz2 \
run-dwelf_elf_e_machine_string.sh \
@@ -471,7 +475,7 @@ TESTS_ENVIRONMENT = LC_ALL=C; LANG=C; VALGRIND_CMD=$(valgrind_cmd); \
export LC_ALL; export LANG; export VALGRIND_CMD; \
NM=$(NM); export NM;
LOG_COMPILER = $(abs_srcdir)/test-wrapper.sh \
- $(abs_top_builddir)/libdw:$(abs_top_builddir)/backends:$(abs_top_builddir)/libelf:$(abs_top_builddir)/libasm
+ $(abs_top_builddir)/libdw:$(abs_top_builddir)/backends:$(abs_top_builddir)/libelf:$(abs_top_builddir)/libasm:$(abs_top_builddir)/dbgserver
installcheck-local:
$(MAKE) $(AM_MAKEFLAGS) \
@@ -607,6 +611,7 @@ unit_info_LDADD = $(libdw)
next_cfi_LDADD = $(libelf) $(libdw)
elfcopy_LDADD = $(libelf)
addsections_LDADD = $(libelf)
+dbgserver_build_id_find_LDADD = $(libdw)
xlate_notes_LDADD = $(libelf)
elfrdwrnop_LDADD = $(libelf)
dwelf_elf_e_machine_string_LDADD = $(libelf) $(libdw)
diff --git a/tests/dbgserver_build_id_find.c b/tests/dbgserver_build_id_find.c
new file mode 100644
index 00000000..8e302c8e
--- /dev/null
+++ b/tests/dbgserver_build_id_find.c
@@ -0,0 +1,60 @@
+/* Test program for fetching debuginfo with debuginfo-server.
+ Copyright (C) 2019 Red Hat, Inc.
+ This file is part of elfutils.
+
+ This file is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ elfutils is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+#include <stdio.h>
+#include ELFUTILS_HEADER(dwfl)
+#include <elf.h>
+#include <dwarf.h>
+#include <argp.h>
+#include <assert.h>
+#include <string.h>
+
+static const char *debuginfo_path = "";
+static const Dwfl_Callbacks cb =
+ {
+ NULL,
+ dwfl_standard_find_debuginfo,
+ NULL,
+ (char **)&debuginfo_path,
+ };
+
+int
+main (int argc __attribute__ ((unused)), char **argv)
+{
+ int expect_pass = strcmp(argv[3], "0");
+ Dwarf_Addr bias = 0;
+ Dwfl *dwfl = dwfl_begin(&cb);
+ dwfl_report_begin(dwfl);
+
+ /* Open an executable. */
+ Dwfl_Module *mod = dwfl_report_offline(dwfl, argv[2], argv[2], -1);
+
+ /* The corresponding debuginfo will not be found in debuginfo_path
+ (since it's empty), causing the server to be queried. */
+
+ Dwarf *res = dwfl_module_getdwarf(mod, &bias);
+ if (expect_pass)
+ assert(res);
+ else
+ assert(!res);
+
+ return 0;
+}
diff --git a/tests/run-dbgserver-find.sh b/tests/run-dbgserver-find.sh
new file mode 100755
index 00000000..bd3a75a6
--- /dev/null
+++ b/tests/run-dbgserver-find.sh
@@ -0,0 +1,56 @@
+# Copyright (C) 2019 Red Hat, Inc.
+# This file is part of elfutils.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# elfutils is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+. $srcdir/test-subr.sh
+
+testfiles testfile-dbgserver.exec
+testfiles testfile-dbgserver.debug
+
+EXPECT_FAIL=0
+EXPECT_PASS=1
+DB=${PWD}/.dbgserver_tmp.sqlite
+export DBGSERVER_CACHE_PATH=${PWD}/.client_cache
+
+# find an unused port number
+while true; do
+ PORT=`expr '(' $RANDOM % 1000 ')' + 9000`
+ ss -atn | fgrep ":$PORT" || break
+done
+
+../../dbgserver/dbgserver -vvv -d $DB -F $PWD -p $PORT &
+PID=$!
+trap 'kill $PID || true; rm -f $DB' 0 1 5 9 15
+sleep 5
+
+export DBGSERVER_URLS=http://localhost:$PORT # XXX: no / at end; dbgserver rejects extra /
+
+# Test whether the server is able to fetch the file from the local dbgserver.
+testrun ${abs_builddir}/dbgserver_build_id_find -e testfile-dbgserver.exec $EXPECT_PASS
+
+kill $PID
+rm $DB
+
+# Run the test again without the server running. The target file should
+# be found in the cache.
+testrun ${abs_builddir}/dbgserver_build_id_find -e testfile-dbgserver.exec $EXPECT_PASS
+
+# Trigger a cache clean and run the test again. The client should be unable to
+# find the target.
+echo 0 > $DBGSERVER_CACHE_PATH/cache_clean_interval_s
+testrun ${abs_builddir}/dbgserver_build_id_find -e testfile-dbgserver.exec $EXPECT_FAIL
+
+rm -rf $DBGSERVER_CACHE_PATH
+exit 0
diff --git a/tests/testfile-dbgserver.debug.bz2 b/tests/testfile-dbgserver.debug.bz2
new file mode 100644
index 00000000..d830eb94
--- /dev/null
+++ b/tests/testfile-dbgserver.debug.bz2
Binary files differ
diff --git a/tests/testfile-dbgserver.exec.bz2 b/tests/testfile-dbgserver.exec.bz2
new file mode 100755
index 00000000..8ae54c11
--- /dev/null
+++ b/tests/testfile-dbgserver.exec.bz2
Binary files differ