diff --git a/.gitignore b/.gitignore index c44eaa58a..2f88451d8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ # out-of-tree build +compile build/ # autoconf m4/libtool.m4 @@ -25,6 +26,7 @@ Makefile stamp-h1 client/icecc client/icecc-create-env +client/icecc-test-env client/libclient.a compilerwrapper/compilerwrapper config.h @@ -41,7 +43,9 @@ suse/icecream.spec doc/*.1 doc/*.7 doc/index.html +tests/results tests/test-suite.log tests/testargs tests/testargs.log tests/testargs.trs +tests/test-setup.sh diff --git a/.travis.yml b/.travis.yml index 5ccd2022d..ff4080ed3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,29 @@ script: - ./autogen.sh - ./configure --prefix=$PWD/_inst - make - - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make test; fi + - | + if test "$TRAVIS_OS_NAME" = "linux"; then + strict="-strict" + if test -n "$VALGRIND"; then + # See tests/README. + sudo /sbin/setcap cap_sys_chroot+ep /usr/lib/valgrind/memcheck-amd64-linux + fi + make test${strict} VALGRIND=$VALGRIND TESTCC=/usr/bin/gcc TESTCXX=/usr/bin/g++ + if test $? -ne 0; then + exit 1 + fi + # Only the clang(-3.4) package provides /usr/bin/clang, but this path is hardcoded in icecream. + # So for now make icecream use the newer version provided by Travis. + sudo ln -s `which clang` /usr/bin/clang + sudo ln -s `which clang++` /usr/bin/clang++ + make test${strict} VALGRIND=$VALGRIND TESTCC=/usr/bin/clang TESTCXX=/usr/bin/clang++ + elif test "$TRAVIS_OS_NAME" = "osx"; then + if test -n "$STRICTTESTS"; then + strict="-strict" + fi + make test${strict} TESTCC=clang TESTCXX=clang++ + fi + make dist env: # important, even though empty! @@ -16,25 +38,33 @@ matrix: sudo: true # for setcap so we can run the tests in chroot. compiler: clang dist: trusty + - os: osx + before_install: + - brew update + - brew install lzo docbook2x gdb ccache + - os: linux + sudo: true # for setcap so we can run the tests in chroot. + compiler: clang + env: VALGRIND=1 + dist: trusty - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: clang env: BUILD_TYPE=asan dist: trusty + # Sanitizer builds with newer travis fail for unknown reason without giving any message. + group: deprecated-2017Q4 - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: clang env: BUILD_TYPE=lsan dist: trusty + group: deprecated-2017Q4 - os: linux sudo: true # for setcap so we can run the tests in chroot. compiler: clang env: BUILD_TYPE=ubsan dist: trusty - - os: osx - before_install: - - brew update - - brew install lzo docbook2x - compiler: gcc env: BUILD_TYPE=cmake script: @@ -51,15 +81,13 @@ matrix: - make -j 4 VERBOSE=1 - echo "tests not yet attempted" allow_failures: - - compiler: clang - env: BUILD_TYPE=lsan - compiler: gcc env: BUILD_TYPE=cmake before_script: - | if [ "$BUILD_TYPE" == "asan" ]; then - export SAN_FLAGS="-fsanitize=address -fno-omit-frame-pointer" + export SAN_FLAGS="-fsanitize=address -fsanitize-address-use-after-scope -fno-omit-frame-pointer" fi - | if [ "$BUILD_TYPE" == "lsan" ]; then @@ -84,9 +112,11 @@ before_script: addons: apt: packages: - - clang + - gcc - libcap-ng-dev - libcap-ng-utils - liblzo2-dev - docbook2x - realpath + - gdb + - valgrind diff --git a/Makefile.am b/Makefile.am index 316893602..ed773680f 100644 --- a/Makefile.am +++ b/Makefile.am @@ -17,3 +17,5 @@ dist-hook: test: install $(MAKE) -C tests $@ +test-strict: install + $(MAKE) -C tests $@ diff --git a/NEWS b/NEWS index 0ac8984ef..b7844fdf0 100644 --- a/NEWS +++ b/NEWS @@ -1,5 +1,4 @@ 1.2 (in progress) -C++11 is required 1.1 - revert "Add load control for preprocessing" diff --git a/README b/README index b42e2c1f1..2c18a5207 100644 --- a/README +++ b/README @@ -6,8 +6,6 @@ form of process accounting in there. How to install icecream ======================= -You must have a compiler that supports at least C++11. - cd icecream ./autogen.sh ./configure --prefix=/opt/icecream diff --git a/README.md b/README.md index bea1214dc..d6cdd24c6 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ Table of Contents - [osc build](#osc-build) - [some compilation node aren't used](#some-compilation-node-arent-used) - - [build with -Werror fails when using icecream ](#build-with--werror-fails-when-using-icecream) + - [build with -Werror fails only when using icecream ](#build-with--werror-fails-only-when-using-icecream) - [clang 4.0 tries to read /proc/cpuinfo and fails](#clang-tries-to-read-proccpuinfo-and-fails) - [Supported platforms](#supported-platforms) @@ -57,8 +57,8 @@ We recommend that you use packages maintained by your distribution if possible. Your distribution should provide customized startup scripts that make icecream fit better into the way your system is configured. -We highly recommend you install [icemon](https://github.com/icecc/icemon) with -icecream. +We highly recommend you install [icemon](https://github.com/icecc/icemon) or +[icecream-sundae](https://github.com/JPEWdev/icecream-sundae) with icecream. If you want to install from source see the instructions in the README file provided in the source package. @@ -111,8 +111,7 @@ simple configuration change) ### make scheduler persistent: -By adding an option --scheduler-host for daemon and --persistent-client-connection for scheduler -,the client connections are not disconnected from the scheduler even there is an availability of better scheduler. +By adding an option --scheduler-host for daemon and --persistent-client-connection for scheduler, the client connections are not disconnected from the scheduler even there is an availability of better scheduler. TroubleShooting ------------------------------------------------------------------------------- @@ -178,45 +177,15 @@ being used at all for compilation, check you have the same icecream version on all nodes, otherwise, nodes running older icecream version might be excluded from available nodes. -The icecream version shipped with openSUSE 12.2 is partially incompatible -with nodes using other icecream versions. 12.2 nodes will not be used for compilation -by other nodes, and depending on the scheduler version 12.2 nodes will not compile -on other nodes either. These incompatible nodes can be identified by having -'Linux3_' prefix in the platform). Replace the openSUSE 12.2 package -with a different one (for example from the devel:tools:build repository). +### build with -Werror fails only when using icecream -### build with -Werror fails when using icecream - -This happens with gcc when `-Werror` is used and preprocessor generates code that issues -warning. For example this code (taken from ltrace project): - - assert(info->type != info->type); - -When building locally, gcc performs preprocessing and compilation in one step -and ignores this warning (see gcc -[bugzilla](https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80369)). But icecream splits -preprocessing (done locally) and compilation (done remotely), which makes gcc trigger -a warning message and compilation fails (because of `-Werror`). - -There is no known workaround, either disable `-Werror` or fix the code. +This problem should not exist with a recent icecream version. If it does, try +using `ICECC_REMOTE_CPP=1` (see `icecc --help`). ### clang tries to read /proc/cpuinfo and fails -This is a bug in clang 4.0. https://bugs.llvm.org/show_bug.cgi?id=33008 -It should be fixed in the future, but if you have a broken release you can work around this by -creating a custom environment and adding /proc/cpuinfo to it. - -``` -/usr/lib/icecc/icecc-create-env --clang /usr/bin/clang /usr/lib/icecc/compilerwrapper --addfile /proc/cpuinfo -``` - -Do not apply this work around if you do not need it. /proc/cpuinfo is machine specific so and this work -around will place wrong information in it. In the case of the bug in clang 4.0 this file is checked for -existence but the contents are not actually used, but it is possible future versions of clang/gcc will use -this file if it exists for something else. - -see [Using icecream in heterogeneous environments](#using-icecream-in-heterogeneous-environments) -for more information on using icecc-create-env. +This is a problem of clang 4.0 and newer: https://bugs.llvm.org/show_bug.cgi?id=33008 +The most recent Icecream version works around this problem. Supported platforms --------------------------------------------------------------------------------------- @@ -233,6 +202,11 @@ tricky parts. Supported are: Note that all these platforms can be used both as server and as client - meaning you can do full cross compiling between them. +The following platforms are known to work at least as a client, meaning that +you can run compilation on them that will compile on remote nodes using cross compilation. + + - Cygwin + Using icecream in heterogeneous environments ----------------------------------------------------------------------------------------------------------------------------------------- @@ -367,14 +341,11 @@ for the same host architecture: in the $PATH and before the path of the toolchains. - Create a tarball file for each toolchain that you want to use with - icecream. The /usr/lib/icecc/icecc-create-env script can be used to + icecream. icecc-create-env script can be used to create the tarball file for each toolchain, for example: - /usr/lib/icecc/icecc-create-env --gcc /work/toolchain1/bin/arm-eabi-gcc - /work/toolchain1/bin/arm-eabi-g++ - - /usr/lib/icecc/icecc-create-env --gcc /work/toolchain2/bin/arm-linux-androideabi-gcc - /work/toolchain2/bin/arm-linux-androideabi-gcc + icecc-create-env /work/toolchain1/bin/arm-eabi-gcc + icecc-create-env /work/toolchain2/bin/arm-linux-androideabi-gcc - Set ICECC\_VERSION to point to the native tarball file and for each tarball file created to the toolchains (e.g ICECC\_VERSION=/work/i386-native.tar.gz,/work/arm-eabi-toolchain1.tar.gz=arm-eabi,/work/arm-linux-androideabi-toolchain2.tar.gz=arm-linux-androideabi). @@ -583,7 +554,7 @@ Icecream on gentoo compiler errors, if not all computers have the same processor type/version -**Be aware** that you have to change the CFLAGS during ich gcc update +**Be aware** that you have to change the CFLAGS during each gcc update too. - To use icecream with emerge/ebuild use PREROOTPATH=/opt/icecream/lib/icecc/bin diff --git a/autogen.sh b/autogen.sh index 6565ea189..79db79065 100755 --- a/autogen.sh +++ b/autogen.sh @@ -3,10 +3,9 @@ TESTLIBTOOLIZE="glibtoolize libtoolize" LIBTOOLIZEFOUND="0" -srcdir=`dirname $0` +srcdir=$(dirname $0) test -z "$srcdir" && srcdir=. -olddir=`pwd` cd $srcdir aclocal --version > /dev/null 2> /dev/null || { diff --git a/client/Makefile.am b/client/Makefile.am index 41054ff63..b66c3ed88 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -1,9 +1,10 @@ bin_PROGRAMS = icecc -bin_SCRIPTS = icecc-create-env +bin_SCRIPTS = icecc-create-env icecc-test-env noinst_LIBRARIES = libclient.a libclient_a_SOURCES = \ arg.cpp \ + argv.c \ cpp.cpp \ local.cpp \ remote.cpp \ @@ -19,6 +20,7 @@ icecc_LDADD = \ $(LIBRSYNC) noinst_HEADERS = \ + argv.h \ client.h \ md5.h \ util.h diff --git a/client/arg.cpp b/client/arg.cpp index d45bcf323..82b26c61d 100644 --- a/client/arg.cpp +++ b/client/arg.cpp @@ -55,7 +55,23 @@ inline int str_startswith(const char *head, const char *worm) return !strncmp(head, worm, strlen(head)); } -static bool analyze_program(const char *name, CompileJob &job) +/* Some files should always be built locally... */ +static bool +should_always_build_locally(const string &filename) +{ + string p; + + p = find_basename(filename); + + if (str_startswith("conftest.", p.c_str()) + || str_startswith("tmp.conftest.", p.c_str())) { + return true; + } + + return false; +} + +static bool analyze_program(const char *name, CompileJob &job, bool& icerun) { string compiler_name = find_basename(name); @@ -79,7 +95,11 @@ static bool analyze_program(const char *name, CompileJob &job) bool vclang = compiler_name.find("clang-") != string::npos; bool vclangpp = compiler_name.find("clang++-") != string::npos; - if ((suffix == "++") || (suffix == "CC") || vgpp || vclangpp) { + if( icerun ) { + job.setLanguage(CompileJob::Lang_Custom); + log_info() << "icerun, running locally." << endl; + return true; + } else if ((suffix == "++") || (suffix == "CC") || vgpp || vclangpp) { job.setLanguage(CompileJob::Lang_CXX); } else if ((suffix == "cc") || vgcc || vclang) { job.setLanguage(CompileJob::Lang_C); @@ -88,6 +108,7 @@ static bool analyze_program(const char *name, CompileJob &job) } else { job.setLanguage(CompileJob::Lang_Custom); log_info() << "custom command, running locally." << endl; + icerun = true; return true; } @@ -106,67 +127,105 @@ static bool is_argument_with_space(const char* argument) // -segcreate // -segprot // Move some arguments to Arg_Cpp or Arg_Local - if (str_equal("-dyld-prefix", argument) - || str_equal("-gcc-toolchain", argument) - || str_equal("--param", argument) - || str_equal("--sysroot", argument) - || str_equal("--system-header-prefix", argument) - || str_equal("-target", argument) - || str_equal("--assert", argument) - || str_equal("--allowable_client", argument) - || str_equal("-arch", argument) - || str_equal("-arch_only", argument) - || str_equal("-arcmt-migrate-report-output", argument) - || str_equal("--prefix", argument) - || str_equal("-bundle_loader", argument) - || str_equal("-dependency-dot", argument) - || str_equal("-dependency-file", argument) - || str_equal("-dylib_file", argument) - || str_equal("-exported_symbols_list", argument) - || str_equal("--bootclasspath", argument) - || str_equal("--CLASSPATH", argument) - || str_equal("--classpath", argument) - || str_equal("--resource", argument) - || str_equal("--encoding", argument) - || str_equal("--extdirs", argument) - || str_equal("-filelist", argument) - || str_equal("-fmodule-implementation-of", argument) - || str_equal("-fmodule-name", argument) - || str_equal("-fmodules-user-build-path", argument) - || str_equal("-fnew-alignment", argument) - || str_equal("-force_load", argument) - || str_equal("--output-class-directory", argument) - || str_equal("-framework", argument) - || str_equal("-frewrite-map-file", argument) - || str_equal("-ftrapv-handler", argument) - || str_equal("-image_base", argument) - || str_equal("-init", argument) - || str_equal("-install_name", argument) - || str_equal("-lazy_framework", argument) - || str_equal("-lazy_library", argument) - || str_equal("-meabi", argument) - || str_equal("-mhwdiv", argument) - || str_equal("-mllvm", argument) - || str_equal("-module-dependency-dir", argument) - || str_equal("-mthread-model", argument) - || str_equal("-multiply_defined", argument) - || str_equal("-multiply_defined_unused", argument) - || str_equal("-rpath", argument) - || str_equal("--rtlib", argument) - || str_equal("-seg_addr_table", argument) - || str_equal("-seg_addr_table_filename", argument) - || str_equal("-segs_read_only_addr", argument) - || str_equal("-segs_read_write_addr", argument) - || str_equal("-serialize-diagnostics", argument) - || str_equal("-std", argument) - || str_equal("--stdlib", argument) - || str_equal("--force-link", argument) - || str_equal("-umbrella", argument) - || str_equal("-unexported_symbols_list", argument) - || str_equal("-weak_library", argument) - || str_equal("-weak_reference_mismatches", argument)) { - - return true; + static const char* const arguments[] = { + "-dyld-prefix", + "-gcc-toolchain", + "--param", + "--sysroot", + "--system-header-prefix", + "-target", + "--assert", + "--allowable_client", + "-arch", + "-arch_only", + "-arcmt-migrate-report-output", + "--prefix", + "-bundle_loader", + "-dependency-dot", + "-dependency-file", + "-dylib_file", + "-exported_symbols_list", + "--bootclasspath", + "--CLASSPATH", + "--classpath", + "--resource", + "--encoding", + "--extdirs", + "-filelist", + "-fmodule-implementation-of", + "-fmodule-name", + "-fmodules-user-build-path", + "-fnew-alignment", + "-force_load", + "--output-class-directory", + "-framework", + "-frewrite-map-file", + "-ftrapv-handler", + "-image_base", + "-init", + "-install_name", + "-lazy_framework", + "-lazy_library", + "-meabi", + "-mhwdiv", + "-mllvm", + "-module-dependency-dir", + "-mthread-model", + "-multiply_defined", + "-multiply_defined_unused", + "-rpath", + "--rtlib", + "-seg_addr_table", + "-seg_addr_table_filename", + "-segs_read_only_addr", + "-segs_read_write_addr", + "-serialize-diagnostics", + "-std", + "--stdlib", + "--force-link", + "-umbrella", + "-unexported_symbols_list", + "-weak_library", + "-weak_reference_mismatches", + "-B", + "-D", + "-U", + "-I", + "-i", + "--include-directory", + "-L", + "-l", + "--library-directory", + "-MF", + "-MT", + "-MQ", + "-cxx-isystem", + "-c-isystem", + "-idirafter", + "--include-directory-after", + "-iframework", + "-iframeworkwithsysroot", + "-imacros", + "-imultilib", + "-iprefix", + "--include-prefix", + "-iquote", + "-isysroot", + "-isystem", + "-isystem-after", + "-ivfsoverlay", + "-iwithprefix", + "--include-with-prefix", + "--include-with-prefix-after", + "-iwithprefixbefore", + "--include-with-prefix-before", + "-iwithsysroot" + }; + + for( size_t i = 0; i < sizeof( arguments ) / sizeof( arguments[ 0 ] ); ++i ) { + if (str_equal( arguments[ i ], argument)) { + return true; + } } return false; @@ -189,24 +248,22 @@ bool analyse_argv(const char * const *argv, CompileJob &job, bool icerun, list 0); - bool always_local = analyze_program(had_cc ? job.compilerName().c_str() : argv[0], job); + bool always_local = analyze_program(had_cc ? job.compilerName().c_str() : argv[0], job, icerun); bool seen_c = false; bool seen_s = false; bool seen_mf = false; bool seen_md = false; bool seen_split_dwarf = false; + bool seen_target = false; + bool wunused_macros = false; + bool seen_arch = false; + bool seen_pedantic = false; // if rewriting includes and precompiling on remote machine, then cpp args are not local Argument_Type Arg_Cpp = compiler_only_rewrite_includes(job) ? Arg_Rest : Arg_Local; explicit_color_diagnostics = false; explicit_no_show_caret = false; - if (icerun) { - always_local = true; - job.setLanguage(CompileJob::Lang_Custom); - log_info() << "icerun, running locally." << endl; - } - for (int i = had_cc ? 2 : 1; argv[i]; i++) { const char *a = argv[i]; @@ -217,11 +274,15 @@ bool analyse_argv(const char * const *argv, CompileJob &job, bool icerun, listfirst); ifile = it->first; it = args.erase(it); + if (should_always_build_locally(ifile)) { + log_info() << "autoconf tests are run locally: " + << ifile << endl; + always_local = true; + } } else { log_info() << "found another non option on command line. Two input files? " << it->first << endl; @@ -651,10 +780,11 @@ bool analyse_argv(const char * const *argv, CompileJob &job, bool icerun, list + +/* Routines imported from standard C runtime libraries. */ + +#include +#include +#include +#include +#include +#include +#include + +#ifndef NULL +#define NULL 0 +#endif + +#ifndef EOS +#define EOS '\0' +#endif + +#define INITIAL_MAXARGC 8 /* Number of args + NULL in initial argv */ + + +/* + +@deftypefn Extension char** dupargv (char * const *@var{vector}) + +Duplicate an argument vector. Simply scans through @var{vector}, +duplicating each argument until the terminating @code{NULL} is found. +Returns a pointer to the argument vector if successful. Returns +@code{NULL} if there is insufficient memory to complete building the +argument vector. + +@end deftypefn + +*/ + +static +char ** +dupargv (char * const *argv) +{ + int argc; + char **copy; + + if (argv == NULL) + return NULL; + + /* the vector */ + for (argc = 0; argv[argc] != NULL; argc++); + copy = (char **) malloc ((argc + 1) * sizeof (char *)); + + /* the strings */ + for (argc = 0; argv[argc] != NULL; argc++) + copy[argc] = strdup (argv[argc]); + copy[argc] = NULL; + return copy; +} + +/* + +@deftypefn Extension void freeargv (char **@var{vector}) + +Free an argument vector that was built using @code{buildargv}. Simply +scans through @var{vector}, freeing the memory for each argument until +the terminating @code{NULL} is found, and then frees @var{vector} +itself. + +@end deftypefn + +*/ + +void freeargv (char **vector) +{ + if (vector == NULL) + return; + + char **scan; + for (scan = vector; *scan != NULL; scan++) + free (*scan); + + free (vector); +} + +static void +consume_whitespace (const char **input) +{ + while (isspace (**input)) + { + (*input)++; + } +} + +static int +only_whitespace (const char* input) +{ + while (*input != EOS && isspace (*input)) + input++; + + return (*input == EOS); +} + +/* + +@deftypefn Extension char** buildargv (char *@var{sp}) + +Given a pointer to a string, parse the string extracting fields +separated by whitespace and optionally enclosed within either single +or double quotes (which are stripped off), and build a vector of +pointers to copies of the string for each field. The input string +remains unchanged. The last element of the vector is followed by a +@code{NULL} element. + +All of the memory for the pointer array and copies of the string +is obtained from @code{malloc}. All of the memory can be returned to the +system with the single function call @code{freeargv}, which takes the +returned result of @code{buildargv}, as it's argument. + +Returns a pointer to the argument vector if successful. Returns +@code{NULL} if @var{sp} is @code{NULL} or if there is insufficient +memory to complete building the argument vector. + +If the input is a null string (as opposed to a @code{NULL} pointer), +then buildarg returns an argument vector that has one arg, a null +string. + +@end deftypefn + +The memory for the argv array is dynamically expanded as necessary. + +In order to provide a working buffer for extracting arguments into, +with appropriate stripping of quotes and translation of backslash +sequences, we allocate a working buffer at least as long as the input +string. This ensures that we always have enough space in which to +work, since the extracted arg is never larger than the input string. + +The argument vector is always kept terminated with a @code{NULL} arg +pointer, so it can be passed to @code{freeargv} at any time, or +returned, as appropriate. + +*/ + +static char **buildargv (const char *input) +{ + if (input == NULL) + return NULL; + + char *copybuf; + int squote = 0; + int dquote = 0; + int bsquote = 0; + int argc = 0; + int maxargc = 0; + char **argv = NULL; + char **nargv; + + copybuf = (char *) malloc (strlen (input) + 1); + /* Is a do{}while to always execute the loop once. Always return an + argv, even for null strings. See NOTES above, test case below. */ + do + { + /* Pick off argv[argc] */ + consume_whitespace (&input); + + if ((maxargc == 0) || (argc >= (maxargc - 1))) + { + /* argv needs initialization, or expansion */ + if (argv == NULL) + { + maxargc = INITIAL_MAXARGC; + nargv = (char **) malloc (maxargc * sizeof (char *)); + } + else + { + maxargc *= 2; + nargv = (char **) realloc (argv, maxargc * sizeof (char *)); + } + argv = nargv; + argv[argc] = NULL; + } + /* Begin scanning arg */ + char *arg = copybuf; + while (*input != EOS) + { + if (isspace (*input) && !squote && !dquote && !bsquote) + { + break; + } + else + { + if (bsquote) + { + bsquote = 0; + *arg++ = *input; + } + else if (*input == '\\') + { + bsquote = 1; + } + else if (squote) + { + if (*input == '\'') + { + squote = 0; + } + else + { + *arg++ = *input; + } + } + else if (dquote) + { + if (*input == '"') + { + dquote = 0; + } + else + { + *arg++ = *input; + } + } + else + { + if (*input == '\'') + { + squote = 1; + } + else if (*input == '"') + { + dquote = 1; + } + else + { + *arg++ = *input; + } + } + input++; + } + } + *arg = EOS; + argv[argc] = strdup (copybuf); + argc++; + argv[argc] = NULL; + + consume_whitespace (&input); + } + while (*input != EOS); + + free (copybuf); + return (argv); +} + +/* + +@deftypefn Extension void expandargv (int *@var{argcp}, char ***@var{argvp}) + +The @var{argcp} and @code{argvp} arguments are pointers to the usual +@code{argc} and @code{argv} arguments to @code{main}. This function +looks for arguments that begin with the character @samp{@@}. Any such +arguments are interpreted as ``response files''. The contents of the +response file are interpreted as additional command line options. In +particular, the file is separated into whitespace-separated strings; +each such string is taken as a command-line option. The new options +are inserted in place of the option naming the response file, and +@code{*argcp} and @code{*argvp} will be updated. If the value of +@code{*argvp} is modified by this function, then the new value has +been dynamically allocated and can be deallocated by the caller with +@code{freeargv}. However, most callers will simply call +@code{expandargv} near the beginning of @code{main} and allow the +operating system to free the memory when the program exits. + +@end deftypefn + +*/ + +void +expandargv (int *argcp, char ***argvp) +{ + /* The argument we are currently processing. */ + int i = 0; + /* To check if ***argvp has been dynamically allocated. */ + char ** const original_argv = *argvp; + /* Limit the number of response files that we parse in order + to prevent infinite recursion. */ + unsigned int iteration_limit = 2000; + /* Loop over the arguments, handling response files. We always skip + ARGVP[0], as that is the name of the program being run. */ + while (++i < *argcp) + { + /* The name of the response file. */ + const char *filename; + /* The response file. */ + FILE *f; + /* An upper bound on the number of characters in the response + file. */ + long pos; + /* The number of characters in the response file, when actually + read. */ + size_t len; + /* A dynamically allocated buffer used to hold options read from a + response file. */ + char *buffer; + /* Dynamically allocated storage for the options read from the + response file. */ + char **file_argv; + /* The number of options read from the response file, if any. */ + size_t file_argc; + struct stat sb; + /* We are only interested in options of the form "@file". */ + filename = (*argvp)[i]; + if (filename[0] != '@') + continue; + /* If we have iterated too many times then stop. */ + if (-- iteration_limit == 0) + { + fprintf (stderr, "%s: error: too many @-files encountered\n", (*argvp)[0]); + exit (1); + } + if (stat (filename+1, &sb) < 0) + continue; + if (S_ISDIR(sb.st_mode)) + { + fprintf (stderr, "%s: error: @-file refers to a directory\n", (*argvp)[0]); + exit (1); + } + /* Read the contents of the file. */ + f = fopen (++filename, "r"); + if (!f) + continue; + if (fseek (f, 0L, SEEK_END) == -1) + goto error; + pos = ftell (f); + if (pos == -1) + goto error; + if (fseek (f, 0L, SEEK_SET) == -1) + goto error; + buffer = (char *) malloc (pos * sizeof (char) + 1); + len = fread (buffer, sizeof (char), pos, f); + if (len != (size_t) pos + /* On Windows, fread may return a value smaller than POS, + due to CR/LF->CR translation when reading text files. + That does not in-and-of itself indicate failure. */ + && ferror (f)) + goto error; + /* Add a NUL terminator. */ + buffer[len] = '\0'; + /* If the file is empty or contains only whitespace, buildargv would + return a single empty argument. In this context we want no arguments, + instead. */ + if (only_whitespace (buffer)) + { + file_argv = (char **) malloc (sizeof (char *)); + file_argv[0] = NULL; + } + else + /* Parse the string. */ + file_argv = buildargv (buffer); + /* If *ARGVP is not already dynamically allocated, copy it. */ + if (*argvp == original_argv) + *argvp = dupargv (*argvp); + /* Count the number of arguments. */ + file_argc = 0; + while (file_argv[file_argc]) + ++file_argc; + /* Free the original options memory. */ + free((*argvp)[i]); + /* Now, insert FILE_ARGV into ARGV. The "+1" below handles the + NULL terminator at the end of ARGV. */ + *argvp = ((char **) + realloc (*argvp, + (*argcp + file_argc + 1) * sizeof (char *))); + memmove (*argvp + i + file_argc, *argvp + i + 1, + (*argcp - i) * sizeof (char *)); + memcpy (*argvp + i, file_argv, file_argc * sizeof (char *)); + /* The original option has been replaced by all the new + options. */ + *argcp += file_argc - 1; + /* Free up memory allocated to process the response file. We do + not use freeargv because the individual options in FILE_ARGV + are now in the main ARGV. */ + free (file_argv); + free (buffer); + /* Rescan all of the arguments just read to support response + files that include other response files. */ + --i; + error: + /* We're all done with the file now. */ + fclose (f); + } +} diff --git a/client/argv.h b/client/argv.h new file mode 100644 index 000000000..ba6c6bfef --- /dev/null +++ b/client/argv.h @@ -0,0 +1,36 @@ +/* -*- mode: C++; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 99; -*- */ +/* vim: set ts=4 sw=4 et tw=99: */ +/* + * icecc -- A simple distributed compiler system + * + * Copyright (C) 2003, 2004 by the Icecream Authors + * + * based on distcc + * Copyright (C) 2002, 2003 by Martin Pool + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifdef __cplusplus +extern "C" +{ +#endif + +void expandargv (int *argcp, char ***argvp); +void freeargv(char **vector); + +#ifdef __cplusplus +} +#endif diff --git a/client/client.h b/client/client.h index 4e41b7e47..6dc37bf73 100644 --- a/client/client.h +++ b/client/client.h @@ -56,12 +56,24 @@ extern std::string find_compiler(const CompileJob &job); extern bool compiler_is_clang(const CompileJob &job); extern bool compiler_only_rewrite_includes(const CompileJob &job); extern std::string compiler_path_lookup(const std::string &compiler); +extern std::string clang_get_default_target(const CompileJob &job); /* In remote.cpp - permill is the probability it will be compiled three times */ extern int build_remote(CompileJob &job, MsgChannel *scheduler, const Environments &envs, int permill); /* safeguard.cpp */ -extern void dcc_increment_safeguard(void); +// We allow several recursions if icerun is involved, just in case icerun is e.g. used to invoke a script +// that calls make that invokes compilations. In this case, it is allowed to have icerun->icecc->compiler. +// However, icecc->icecc recursion is a problem, so just one recursion exceeds the limit. +// Also note that if the total number of such recursive invocations exceedds the number of allowed local +// jobs, iceccd will not assign another local job and the whole build will get stuck. +static const int SafeguardMaxLevel = 2; +enum SafeguardStep +{ + SafeguardStepCompiler = SafeguardMaxLevel, + SafeguardStepCustom = 1 +}; +extern void dcc_increment_safeguard(SafeguardStep step); extern int dcc_recursion_safeguard(void); extern Environments parse_icecc_version(const std::string &target, const std::string &prefix); diff --git a/client/cpp.cpp b/client/cpp.cpp index 5f74379e0..dabfb3ab4 100644 --- a/client/cpp.cpp +++ b/client/cpp.cpp @@ -123,7 +123,7 @@ pid_t call_cpp(CompileJob &job, int fdwrite, int fdread) if (it != flags.end()) { std::string p = (*it); - if (access(p.c_str(), R_OK) && !access((p + ".gch").c_str(), R_OK)) { + if (access(p.c_str(), R_OK) < 0 && access((p + ".gch").c_str(), R_OK) == 0) { list::iterator o = --it; it++; flags.erase(o); @@ -140,7 +140,7 @@ pid_t call_cpp(CompileJob &job, int fdwrite, int fdread) int argc = flags.size(); argc++; // the program argc += 2; // -E file.i - argc += 1; // -frewrite-includes + argc += 1; // -frewrite-includes / -fdirectives-only argv = new char*[argc + 1]; argv[0] = strdup(find_compiler(job).c_str()); int i = 1; @@ -153,21 +153,22 @@ pid_t call_cpp(CompileJob &job, int fdwrite, int fdread) argv[i++] = strdup(job.inputFile().c_str()); if (compiler_only_rewrite_includes(job)) { - argv[i++] = strdup("-frewrite-includes"); + if( compiler_is_clang(job)) { + argv[i++] = strdup("-frewrite-includes"); + } else { // gcc + argv[i++] = strdup("-fdirectives-only"); + } } argv[i++] = 0; } -#if 0 - printf("forking "); - - for (int index = 0; argv[index]; index++) { - printf("%s ", argv[index]); + string argstxt = argv[ 0 ]; + for( int i = 1; argv[ i ] != NULL; ++i ) { + argstxt += ' '; + argstxt += argv[ i ]; } - - printf("\n"); -#endif + trace() << "preparing source to send: " << argstxt << endl; if (fdwrite != STDOUT_FILENO) { /* Ignore failure */ @@ -176,8 +177,9 @@ pid_t call_cpp(CompileJob &job, int fdwrite, int fdread) close(fdwrite); } - dcc_increment_safeguard(); + dcc_increment_safeguard(SafeguardStepCompiler); execv(argv[0], argv); + int exitcode = ( errno == ENOENT ? 127 : 126 ); log_perror("execv failed"); - _exit(-1); + _exit(exitcode); } diff --git a/client/icecc-create-env.in b/client/icecc-create-env.in index da3186a17..ce9c6fe10 100755 --- a/client/icecc-create-env.in +++ b/client/icecc-create-env.in @@ -6,7 +6,10 @@ target_files= -case `uname` in +# Optional path to strip from all paths if present, e.g. if the compiler is not in /usr. +stripprefix= + +case $(uname) in "Darwin") is_darwin=1;; "FreeBSD") is_freebsd=1;; "Linux") is_linux=1;; @@ -14,9 +17,14 @@ esac usage () { - echo "usage: $0 --gcc " - echo "usage: $0 --clang " - echo "usage: Use --addfile to add extra files." + echo "Create compiler environment for distributed build." + echo "Usage: $0 " + echo "For GCC, pass the the gcc binary, the matching g++ will be used automatically." + echo "For Clang, pass the clang binary." + echo "Use --addfile to add extra files." + echo "For backwards compatibility, the following is also supported:" + echo "$0 --gcc " + echo "$0 --clang " } is_contained () @@ -28,6 +36,48 @@ is_contained () esac } +# returns abs path to filedir +abs_path() +{ + local path=$1 + if test -f "$path"; then + pushd $(dirname $path) > /dev/null 2>&1 + dir_path=$(pwd -P) + path=$dir_path/$(basename $path) + popd > /dev/null 2>&1 + elif test -d "$path"; then + pushd $path > /dev/null 2>&1 + path=$(pwd -P) + popd > /dev/null 2>&1 + fi + echo $path +} + +# return abs path to filedir with symlinks resolved +resolve_path() +{ + local_path=$1 + pushd / >/dev/null + # pwd -P in abs_path will take care of resolving symlinks in the path, + # so take care just of the file component itself + while test -L "$local_path"; do + cd $(dirname $local_path) + local_path=$(readlink $local_path) + done + abs_path $local_path + popd >/dev/null +} + +# Avoid /../ components in paths such as /usr/X11/../lib64 . +# This could use realpath, but that's reportedly not that widely available. +convert_path_cdup () +{ + local filename="$1" + local directory=$(dirname $filename) + local fixed_directory=$(cd "$directory" >/dev/null && pwd) + echo ${fixed_directory}/$(basename $filename) +} + add_file () { local name="$1" @@ -36,9 +86,11 @@ add_file () name="$2" fi test -z "$name" && return - # ls -H isn't really the same as readlink, but - # readlink is not portable enough. - path=`ls -H $path` + path=$(resolve_path $path) + name=$(convert_path_cdup $name) + if test -n "$stripprefix"; then + name=$(echo $name | sed "s#$stripprefix#/usr#" ) + fi toadd="$name=$path" if test "$name" = "$path"; then toadd=$path @@ -57,11 +109,11 @@ add_file () # libc.so.6 => /lib/tls/libc.so.6 (0xb7e81000) # /lib/ld-linux.so.2 (0xb7fe8000) # covering both situations ( with => and without ) - for lib in `ldd "$path" | sed -n 's,^[^/]*\(/[^ ]*\).*,\1,p'`; do + for lib in $(ldd "$path" | sed -n 's,^[^/]*\(/[^ ]*\).*,\1,p'); do test -f "$lib" || continue # Check wether the same library also exists in the parent directory, # and prefer that on the assumption that it is a more generic one. - local baselib=`echo "$lib" | sed 's,\(/[^/]*\)/.*\(/[^/]*\)$,\1\2,'` + local baselib=$(echo "$lib" | sed 's,\(/[^/]*\)/.*\(/[^/]*\)$,\1\2,') test -f "$baselib" && lib=$baselib add_file "$lib" done @@ -73,11 +125,11 @@ add_file () # /usr/lib/libiconv.2.dylib # /usr/lib/libSystem.B.dylib # /usr/lib/libstdc++.6.dylib - for lib in `otool -L "$path" | sed -n 's,^[^/@]*\([/@][^ ]*\).*,\1,p'`; do + for lib in $(otool -L "$path" | sed -n 's,^[^/@]*\([/@][^ ]*\).*,\1,p'); do local libinstall="" if test "${lib%%/*}" = "@executable_path"; then # Installs libs like @executable_path/libllvmgcc.dylib - # that contains @executable_path in its path in `dirname ${name}` + # that contains @executable_path in its path in $(dirname ${name}) # (the same install path of the executable program) libinstall="${name%/*}${lib#@executable_path}" lib="${path%/*}${lib#@executable_path}" @@ -85,7 +137,7 @@ add_file () test -f "$lib" || continue # Check wether the same library also exists in the parent directory, # and prefer that on the assumption that it is a more generic one. - local baselib=`echo "$lib" | sed 's,\(/[^/]*\)/.*\(/[^/]*\)$,\1\2,'` + local baselib=$(echo "$lib" | sed 's,\(/[^/]*\)/.*\(/[^/]*\)$,\1\2,') test -f "$baselib" && lib=$baselib add_file "$lib" "$libinstall" done @@ -93,23 +145,6 @@ add_file () fi } -# returns abs path to filedir -abs_path() -{ - local path=$1 - if test -f "$path"; then - pushd $(dirname $path) > /dev/null 2>&1 - dir_path=`pwd -P` - path=$dir_path/$(basename $path) - popd > /dev/null 2>&1 - elif test -d "$path"; then - pushd $path > /dev/null 2>&1 - path=`pwd -P` - popd > /dev/null 2>&1 - fi - echo $path -} - # Search and add file to the tarball file. search_addfile() { @@ -121,7 +156,11 @@ search_addfile() file=$($compiler -print-prog-name=$file_name) if test -z "$file" || test "$file" = "$file_name" || ! test -e "$file"; then - file=`$compiler -print-file-name=$file_name` + file=$($compiler -print-file-name=$file_name) + fi + + if test "$file" = "$file_name"; then + file=$(command -v $file_name || echo $file_name) fi if ! test -e "$file"; then @@ -130,24 +169,11 @@ search_addfile() if test -z "$file_installdir"; then # The file is going to be added to the tarball - # in the same path where the compiler found it. + # in the same path where the compiler found it, as an absolute path. + # If it's not in the /usr prefix, stripprefix handling will take care of that. file_installdir=$(dirname $file) - abs_installdir=$(abs_path $file_installdir) - - if test "$file_installdir" != "$abs_installdir"; then - # The path where the compiler found the file is relative! - # If the path where the compiler found the file is relative - # to compiler's path, we must change it to be relative to - # /usr/bin path where the compiler is going to be installed - # in the tarball file. - # Replacing relative path by abs path because the tar command - # used to create the tarball file doesn't work well with - # relative path as installdir. - - compiler_basedir=$(abs_path ${compiler%/*/*}) - file_installdir=${abs_installdir/$compiler_basedir/"/usr"} - fi + file_installdir=$(abs_path $file_installdir) fi add_file "$file" "$file_installdir/$file_name" @@ -160,37 +186,95 @@ if test "$1" = "--respect-path"; then shift fi -if test "$1" != "--gcc" -a "$1" != "--clang"; then - # backward compat +if test "$1" = "--gcc"; then + shift added_gcc=$1 shift added_gxx=$1 shift gcc=1 -else - if test "$1" = "--gcc"; then - shift - added_gcc=$1 - shift - added_gxx=$1 - shift - gcc=1 - elif test "$1" = "--clang"; then + if test "$1" = "--clang"; then shift added_clang=$1 shift - added_compilerwrapper=$1 - if test -n "$added_compilerwrapper"; then + if test "x$1" != "x--addfile" -a "x$1" != "x--gcc" -a -e "$1"; then # accept 2nd argument being the compilerwrapper binary, for backwards compatibility + added_compilerwrapper=$1 shift - else + fi + if test -z "$added_compilerwrapper"; then added_compilerwrapper=@PKGLIBEXECDIR@/compilerwrapper fi clang=1 - else + fi +elif test "$1" = "--clang"; then + shift + added_clang=$1 + shift + if test "x$1" != "x--addfile" -a "x$1" != "x--gcc" -a -e "$1"; then + # accept 2nd argument being the compilerwrapper binary, for backwards compatibility + added_compilerwrapper=$1 + shift + fi + if test -z "$added_compilerwrapper"; then + added_compilerwrapper=@PKGLIBEXECDIR@/compilerwrapper + fi + clang=1 + if test "$1" = "--gcc"; then + shift + added_gcc=$1 + shift + added_gxx=$1 + shift + gcc=1 + fi +else + if test -z "$1"; then usage exit 1 fi + # We got just a binary, find out what compiler it is and bypass any possible wrappers. + # __clang__ expands to 1 if compiler is Clang + # __GNUC__ expands to the main version number (and is valid also with Clang) + test_output=$(echo "clang __clang__ gcc __GNUC__" | "$1" -E -) + if test $? -ne 0; then + echo "$1" is not a compiler. + exit 1 + fi + if echo "$test_output" | grep -q '^clang 1 gcc.*'; then + clang=1 + # With clang, -print-prog-name gives the full path to the actual clang binary, + # allowing to bypass any possible wrapper script etc. Note we must pass + # just the binary name, not full path. + added_clang=$($1 -print-prog-name=$(basename $1)) + added_compilerwrapper=@PKGLIBEXECDIR@/compilerwrapper + elif echo "$test_output" | grep -q 'clang __clang__ gcc.*'; then + gcc=1 + # Gcc's -print-prog-name is useless, as it prints simply "gcc", so we have to + # get the location of the actual gcc binary from gcc -v output, which prints + # (to stderr) gcc's argv[0] as COLLECT_GCC. + added_gcc=$($1 -v 2>&1 | grep COLLECT_GCC= | sed 's/^COLLECT_GCC=//') + if test -z "$added_gcc"; then + echo Failed to find gcc location. + exit 1 + fi + if ! test -x "$added_gcc"; then + added_gcc=$(command -v $added_gcc) + fi + else + echo "$1" is not a known compiler. + exit 1 + fi + shift + if test -n "$1" -a "x$1" != "x--addfile"; then + # (backwards) compatibility, assume the second argument is the C++ compiler + added_gxx=$1 + shift + fi + if test -n "$gcc" -a -z "$added_gxx"; then + # guess g++ from gcc + added_gxx=$(echo $added_gcc | sed 's/\(.*\)gcc/\1g++/') + fi fi if test -n "$gcc"; then @@ -238,7 +322,12 @@ while test "x$1" = "x--addfile"; do shift done -tempdir=`mktemp -d /tmp/iceccenvXXXXXX` +if test -n "$1"; then + echo "Unknown argument '$1'" + exit 1 +fi + +tempdir=$(mktemp -d /tmp/iceccenvXXXXXX) # for testing the environment is usable at all if test -x /bin/true; then @@ -248,9 +337,11 @@ elif test -x /usr/bin/true; then fi if test -n "$gcc"; then - # getting compilers abs path - added_gcc=$(abs_path $added_gcc) - added_gxx=$(abs_path $added_gxx) + # getting compilers resolved path + added_gcc=$(resolve_path $added_gcc) + added_gxx=$(resolve_path $added_gxx) + # In case gcc is installed elsewhere. + stripprefix=$(dirname $(dirname $added_gcc)) if test -z "$clang"; then add_file $added_gcc /usr/bin/gcc @@ -261,23 +352,19 @@ if test -n "$gcc"; then add_file $added_gcc /usr/bin/gcc.bin add_file $added_gxx /usr/bin/g++.bin fi - add_file `$added_gcc -print-prog-name=cc1` /usr/bin/cc1 - add_file `$added_gxx -print-prog-name=cc1plus` /usr/bin/cc1plus - - gcc_as=$($added_gcc -print-prog-name=as) - if test "$gcc_as" = "as"; then - add_file /usr/bin/as - else - add_file "$gcc_as" /usr/bin/as - fi - + search_addfile $added_gcc cc1 /usr/bin + search_addfile $added_gxx cc1plus /usr/bin + search_addfile $added_gcc as /usr/bin search_addfile $added_gcc specs search_addfile $added_gcc liblto_plugin.so - - objcopy=$($added_gcc -print-prog-name=objcopy) + search_addfile $added_gcc objcopy /usr/bin fi if test -n "$clang"; then + # getting compilers resolved path + added_clang=$(resolve_path $added_clang) + # In case clang is installed elsewhere. + stripprefix=$(dirname $(dirname $added_clang)) add_file $added_clang /usr/bin/clang # HACK: Older icecream remotes have /usr/bin/{gcc|g++} hardcoded and wouldn't # call /usr/bin/clang at all. So include a wrapper binary that will call gcc or clang @@ -285,40 +372,46 @@ if test -n "$clang"; then add_file $added_compilerwrapper /usr/bin/gcc add_file $added_compilerwrapper /usr/bin/g++ - add_file $($added_clang -print-prog-name=as) /usr/bin/as + search_addfile $added_clang as /usr/bin + search_addfile $added_clang objcopy /usr/bin + + # HACK: Clang4.0 and later access /proc/cpuinfo and report an error when they fail + # to find it, even if they use a fallback mechanism, making the error useless + # (at least in this case). Since the file is not really needed, create a fake one. + if test -d /proc; then + mkdir $tempdir/fakeproc + mkdir $tempdir/fakeproc/proc + touch $tempdir/fakeproc/proc/cpuinfo + add_file $tempdir/fakeproc/proc/cpuinfo /proc/cpuinfo + fi # clang always uses its internal .h files - clangincludes=$(dirname $($added_clang -print-file-name=include/limits.h)) - clangprefix=$(dirname $(dirname $added_clang)) + clangincludes=$($added_clang -print-file-name=include/limits.h) + if test -z "$clangincludes"; then + echo $added_clang cannot find its includes + exit 1 + fi + clangincludes=$(dirname $(abs_path $clangincludes)) for file in $(find $clangincludes -type f); do - # get path without .. - # readlink is not portable enough. - destfile=$(abs_path $file) - # and convert from to /usr if needed - destfile=$(echo $destfile | sed "s#$clangprefix#/usr#" ) - add_file "$file" "$destfile" + add_file "$file" done - - objcopy=$($added_clang -print-prog-name=objcopy) fi +# Do not do any prefix stripping on extra files, they (e.g. clang plugins) are usually +# referred to using their original path. +save_stripprefix="$stripprefix" +stripprefix= for extrafile in $extrafiles; do add_file $extrafile done - -if test "$objcopy" = "objcopy"; then - objcopy=/usr/bin/objcopy -fi -if [ -e "$objcopy" ]; then - add_file "$objcopy" /usr/bin/objcopy -fi +stripprefix="$save_stripprefix" if test "$is_darwin" = 1; then # add dynamic linker add_file /usr/lib/dyld add_file /usr/bin/gcc add_file /usr/bin/g++ - real_file=`/usr/bin/as -micha -- < /dev/null 2>&1 | sed -n 's,^[^/]*\(/[^ :]*\).*,\1,p'` + real_file=$(/usr/bin/as -micha -- < /dev/null 2>&1 | sed -n 's,^[^/]*\(/[^ :]*\).*,\1,p') add_file $(abs_path "$real_file") fi @@ -329,7 +422,7 @@ fi # for ldconfig -r to work, ld.so.conf must not contain relative paths # in include directives. Make them absolute. if test -f /etc/ld.so.conf; then - tmp_ld_so_conf=`mktemp /tmp/icecc_ld_so_confXXXXXX` + tmp_ld_so_conf=$(mktemp /tmp/icecc_ld_so_confXXXXXX) while read directive path; do if [ "$directive" = "include" -a "${path:0:1}" != "/" ]; then path="/etc/$path" @@ -348,20 +441,17 @@ new_target_files= for i in $target_files; do case $i in *=/*) - target=`echo $i | cut -d= -f1` - path=`echo $i | cut -d= -f2` + target=$(echo $i | cut -d= -f1) + path=$(echo $i | cut -d= -f2) ;; *) path=$i target=$i ;; esac - mkdir -p $tempdir/`dirname $target` + mkdir -p $tempdir/$(dirname $target) cp -p $path $tempdir/$target - if test -f $tempdir/$target -a -x $tempdir/$target; then - strip -s $tempdir/$target 2>/dev/null - fi - target=`echo $target | cut -b2-` + target=$(echo $target | cut -b2-) new_target_files="$new_target_files $target" done @@ -381,13 +471,13 @@ done # now sort the files in order to make the md5sums independent # of ordering -target_files=`for i in $new_target_files; do echo $i; done | sort` -md5=`for i in $target_files; do $md5sum $tempdir/$i; done | sed -e 's/ .*$//' | $md5sum | sed -e 's/ .*$//'` || { +target_files=$(for i in $new_target_files; do echo $i; done | sort) +md5=$(for i in $target_files; do $md5sum $tempdir/$i; done | sed -e "s# $tempdir##" | $md5sum | sed -e 's/ .*$//') || { echo "Couldn't compute MD5 sum." exit 2 } echo "creating $md5.tar.gz" -mydir=`pwd` +mydir=$(pwd) cd $tempdir tar -czh --numeric-owner -f "$mydir/$md5".tar.gz $target_files || { echo "Couldn't create archive" diff --git a/client/icecc-test-env.in b/client/icecc-test-env.in new file mode 100755 index 000000000..448fcd283 --- /dev/null +++ b/client/icecc-test-env.in @@ -0,0 +1,147 @@ +#! /bin/bash +# +# A simple script that can be used to see if an environment was built +# successfully. Note that passing these test doesn't guarantee the environment +# will work, but failing them means it certainly won't. Note that this script +# may need to be executed with sudo if the current user doesn't have chroot +# permissions +# +# This program always exits with an error code of 2 so that it can be +# distinguished from a sudo error (with an exit code of 1) +# +# Copyright (C) 2018 Joshua Watt +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +# Exit on any unexpected failure +set -e + +TEST_DIR= +QUIET=false +REALPATH=$(which realpath 2> /dev/null || true) + +# Cleanup the temp directory on exit +cleanup() { + if [ -n "$TEST_DIR" ]; then + rm -rf "$TEST_DIR" + fi +} +trap cleanup EXIT + +print_info() { + if ! $QUIET; then + echo "$@" + fi +} + +usage() { + echo "Usage: $(basename $0) [-h] TOOLCHAIN" + echo " -h --help Show Help" + echo " -q --quiet Only print errors" + echo " TOOLCHAIN Toolchain archive to test" + echo "" + echo "Tests a toolchain environment to see if it is correctly constructed" +} + +OPTIONS=`getopt -o hqf --long help,quiet -n $(basename $0) -- "$@"` +eval set -- "$OPTIONS" + +while true; do + case "$1" in + -h|--help) + usage + exit 0 + ;; + -q|--quiet) + QUIET=true + shift + ;; + --) + shift + break + ;; + *) + echo "Unknown option '$1'" + exit 2 + ;; + esac +done + +if [ -z "$1" ]; then + echo "Toolchain argument is required" + usage + exit 2 +fi +TEST_DIR=$(mktemp -d) + +if [ -z "$REALPATH" ]; then + echo "WARNING: realpath not found, symlink tests will be disabled" +fi + +# Extract the toolchain +tar -xf "$1" -C "$TEST_DIR" + +# Determine the compiler +if [ -e $TEST_DIR/usr/bin/clang ]; then + print_info "Compiler is clang" + IS_CLANG=true +else + print_info "Compiler is gcc" + IS_CLANG=false +fi + +check_program() { + local prog="$1" + shift + + cd $TEST_DIR + print_info "Checking $prog..." + if [ ! -x "${TEST_DIR}${prog}" ]; then + echo "$prog is missing or not executable" + exit 2 + fi + if [ -n "$REALPATH" ]; then + local target="$($REALPATH "${TEST_DIR}${prog}")" + case $target in + "$($REALPATH "${TEST_DIR}")"/*) + ;; + *) + echo "$prog is a symbolic link that points to '$target' outside the environment" + exit 2 + ;; + esac + fi + if ! chroot . $prog $@ < /dev/null; then + echo "$prog failed to execute" + exit 2 + fi + print_info "OK" +} + +check_program /bin/true +if $IS_CLANG; then + check_program /usr/bin/clang -xc -c -o test.o - + check_program /usr/bin/as + # NOTE: The compilerwrapper programs /usr/bin/gcc and /usr/bin/g++ are not + # tested because they interfer with the automated testing when the + # address sanitizer is enabled +else + ARGS="-fpreprocessed" + check_program /usr/bin/gcc $ARGS -xc -c -o test.o - + check_program /usr/bin/g++ $ARGS -xc++ -c -o test.o - + check_program /usr/bin/cc1 $ARGS -o test.o -quiet + check_program /usr/bin/cc1plus $ARGS -o test.o -quiet + check_program /usr/bin/as +fi diff --git a/client/local.cpp b/client/local.cpp index b803c2605..4fc6eceaf 100644 --- a/client/local.cpp +++ b/client/local.cpp @@ -97,7 +97,9 @@ static string compiler_path_lookup_helper(const string &compiler, const string & continue; } - best_match = part; + if( best_match.empty()) { + best_match = part; + } if (after_selflink) { return part; @@ -157,9 +159,27 @@ Therefore it is better to only locally merge all #include files into the source file and do the actual preprocessing remotely together with compiling. There exists a Clang patch to implement option -frewrite-includes that does such #include rewritting, and it's been only recently merged upstream. + +This is similar with newer gcc versions, and gcc has -fdirectives-only, which +works similarly to -frewrite-includes (although it's not exactly the same). */ bool compiler_only_rewrite_includes(const CompileJob &job) { + if( job.blockRewriteIncludes()) { + return false; + } + if (const char *rewrite_includes = getenv("ICECC_REMOTE_CPP")) { + return (*rewrite_includes != '\0') && (*rewrite_includes != '0'); + } + if (!compiler_is_clang(job)) { +#ifdef HAVE_GCC_FDIRECTIVES_ONLY + // gcc has had -fdirectives-only for a long time, but clang on macosx poses as gcc + // and fails when given the option. Since we right now detect whether a compiler + // is gcc merely by checking the binary name, enable usage only if the configure + // check found the option working. + return true; +#endif + } if (compiler_is_clang(job)) { if (const char *rewrite_includes = getenv("ICECC_CLANG_REMOTE_CPP")) { return (*rewrite_includes != '\0') && (*rewrite_includes != '0'); @@ -177,6 +197,11 @@ bool compiler_only_rewrite_includes(const CompileJob &job) return false; } +string clang_get_default_target(const CompileJob &job) +{ + return read_command_output( find_compiler( job ) + " -dumpmachine" ); +} + static volatile int lock_fd = 0; static volatile int user_break_signal = 0; static volatile pid_t child_pid; @@ -218,8 +243,6 @@ int build_local(CompileJob &job, MsgChannel *local_daemon, struct rusage *used) string compiler_name = find_compiler(job); - trace() << "invoking: " << compiler_name << endl; - if (compiler_name.empty()) { log_error() << "could not find " << job.compilerName() << " in PATH." << endl; return EXIT_NO_SUCH_FILE; @@ -242,21 +265,17 @@ int build_local(CompileJob &job, MsgChannel *local_daemon, struct rusage *used) } vector argv; + string argstxt; for (list::const_iterator it = arguments.begin(); it != arguments.end(); ++it) { argv.push_back(strdup(it->c_str())); + argstxt += ' '; + argstxt += *it; } argv.push_back(0); -#if CLIENT_DEBUG - trace() << "execing "; - for (int i = 0; argv.at(i); i++) { - trace() << argv.at(i) << " "; - } - - trace() << endl; -#endif + trace() << "invoking:" << argstxt << endl; if (!local_daemon) { int fd; @@ -287,7 +306,7 @@ int build_local(CompileJob &job, MsgChannel *local_daemon, struct rusage *used) } if (!child_pid) { - dcc_increment_safeguard(); + dcc_increment_safeguard(job.language() == CompileJob::Lang_Custom ? SafeguardStepCustom : SafeguardStepCompiler); if (color_output) { if ((-1 == close(pf[0])) && (errno != EBADF)){ @@ -302,6 +321,7 @@ int build_local(CompileJob &job, MsgChannel *local_daemon, struct rusage *used) } execv(argv[0], &argv[0]); + int exitcode = ( errno == ENOENT ? 127 : 126 ); log_perror("execv failed"); if (lock_fd) { @@ -314,7 +334,7 @@ int build_local(CompileJob &job, MsgChannel *local_daemon, struct rusage *used) log_perror(buf); } - _exit(-1); + _exit(exitcode); } for(vector::const_iterator i = argv.begin(); i != argv.end(); ++i){ free(*i); diff --git a/client/main.cpp b/client/main.cpp index 7a03dea04..e29649c97 100644 --- a/client/main.cpp +++ b/client/main.cpp @@ -60,6 +60,7 @@ #include "client.h" #include "platform.h" #include "util.h" +#include "argv.h" using namespace std; @@ -70,7 +71,7 @@ static void dcc_show_usage(void) printf( "Usage:\n" " icecc [compiler] [compile options] -o OBJECT -c SOURCE\n" - " icecc --build-native [compilertype] [file...]\n" + " icecc --build-native [compiler] [file...]\n" " icecc --help\n" "\n" "Options:\n" @@ -82,7 +83,7 @@ static void dcc_show_usage(void) " If set to \"disable\", just exec the real compiler, but without\n" " notifying the daemon and only run one job at a time.\n" " ICECC_VERSION use a specific icecc environment, see icecc-create-env\n" - " ICECC_DEBUG [info | warnings | debug]\n" + " ICECC_DEBUG [info | warning | debug]\n" " sets verboseness of icecream client.\n" " ICECC_LOGFILE if set, additional debug information is logged to the specified file\n" " ICECC_REPEAT_RATE the number of jobs out of 1000 that should be\n" @@ -91,7 +92,7 @@ static void dcc_show_usage(void) " ICECC_PREFERRED_HOST overrides scheduler decisions if set.\n" " ICECC_CC set C compiler name (default gcc).\n" " ICECC_CXX set C++ compiler name (default g++).\n" - " ICECC_CLANG_REMOTE_CPP set to 1 or 0 to override remote preprocessing with clang\n" + " ICECC_REMOTE_CPP set to 1 or 0 to override remote preprocessing\n" " ICECC_IGNORE_UNVERIFIED if set, hosts where environment cannot be verified are not used.\n" " ICECC_EXTRAFILES additional files used in the compilation.\n" " ICECC_COLOR_DIAGNOSTICS set to 1 or 0 to override color diagnostics support.\n" @@ -111,7 +112,7 @@ static void icerun_show_usage(void) " --version show version and exit\n" "Environment Variables:\n" " ICECC if set to \"no\", just exec the real command\n" - " ICECC_DEBUG [info | warnings | debug]\n" + " ICECC_DEBUG [info | warning | debug]\n" " sets verboseness of icecream client.\n" " ICECC_LOGFILE if set, additional debug information is logged to the specified file\n" "\n"); @@ -141,44 +142,33 @@ static void dcc_client_catch_signals(void) signal(SIGHUP, &dcc_client_signalled); } -static string read_output(const char *command) -{ - FILE *f = popen(command, "r"); - string output; - - if (!f) { - log_error() << "no pipe " << strerror(errno) << endl; - return output; - } - - char buffer[1024]; - - while (!feof(f)) { - size_t bytes = fread(buffer, 1, sizeof(buffer) - 1, f); - buffer[bytes] = 0; - output += buffer; - } - - pclose(f); - // get rid of the endline - return output.substr(0, output.length() - 1); -} - /* - * @param args Are [clang,gcc] [extra files...] + * @param args Are [compiler] [extra files...] + * Compiler can be "gcc", "clang" or a binary (possibly including a path). */ static int create_native(char **args) { - bool is_clang = false; char **extrafiles = args; string machine_name = determine_platform(); - if (machine_name.compare(0, 6, "Darwin") == 0) - is_clang = true; - // Args[0] may be a compiler or the first extra file. - if (args[0] && ((!strcmp(args[0], "clang") && (is_clang = true)) - || (!strcmp(args[0], "gcc") && !(is_clang = false)))) { - extrafiles++; + string compiler = "gcc"; + if (machine_name.compare(0, 6, "Darwin") == 0) { + compiler = "clang"; + } + if (args[0]) { + if( strcmp(args[0], "clang") == 0 || strcmp(args[0], "gcc") == 0 ) { + compiler = args[ 0 ]; + ++extrafiles; + } else if( access( args[0], R_OK ) == 0 && access( args[ 0 ], X_OK ) != 0 ) { + // backwards compatibility, the first argument is already an extra file + } else { + compiler = compiler_path_lookup( args[ 0 ] ); + if (compiler.empty()) { + log_error() << "compiler not found" << endl; + return 1; + } + ++extrafiles; + } } vector argv; @@ -190,71 +180,121 @@ static int create_native(char **args) } argv.push_back(strdup(BINDIR "/icecc-create-env")); + argv.push_back(strdup(compiler.c_str())); - if (is_clang) { - string clang = compiler_path_lookup("clang"); + for (int extracount = 0; extrafiles[extracount]; extracount++) { + argv.push_back(strdup("--addfile")); + argv.push_back(strdup(extrafiles[extracount])); + } - if (clang.empty()) { - log_error() << "clang compiler not found" << endl; - return 1; - } + argv.push_back(NULL); + execv(argv[0], argv.data()); + log_perror("execv failed"); + return -1; +} - if (lstat(PLIBDIR "/compilerwrapper", &st)) { - log_error() << PLIBDIR "/compilerwrapper does not exist" << endl; - return 1; +static MsgChannel* get_local_daemon() +{ + MsgChannel* local_daemon; + if (getenv("ICECC_TEST_SOCKET") == NULL) { + /* try several options to reach the local daemon - 3 sockets, one TCP */ + local_daemon = Service::createChannel("/var/run/icecc/iceccd.socket"); + + if (!local_daemon) { + local_daemon = Service::createChannel("/var/run/iceccd.socket"); } - argv.push_back(strdup("--clang")); - argv.push_back(strdup(clang.c_str())); - argv.push_back(strdup(PLIBDIR "/compilerwrapper")); - } else { // "gcc" (default) - string gcc, gpp; - - // perhaps we're on gentoo - if (!lstat("/usr/bin/gcc-config", &st)) { - string gccpath = read_output("/usr/bin/gcc-config -B") + "/"; - gcc = gccpath + "gcc"; - gpp = gccpath + "g++"; - } else { - gcc = compiler_path_lookup("gcc"); - gpp = compiler_path_lookup("g++"); + if (!local_daemon && getenv("HOME")) { + string path = getenv("HOME"); + path += "/.iceccd.socket"; + local_daemon = Service::createChannel(path); } - // both C and C++ compiler are required - if (gcc.empty() || gpp.empty()) { - log_error() << "gcc compiler not found" << endl; - return 1; + if (!local_daemon) { + local_daemon = Service::createChannel("127.0.0.1", 10245, 0/*timeout*/); + } + } else { + local_daemon = Service::createChannel(getenv("ICECC_TEST_SOCKET")); + if (!local_daemon) { + log_error() << "test socket error" << endl; + exit( EXIT_TEST_SOCKET_ERROR ); } + } + return local_daemon; +} - argv.push_back(strdup("--gcc")); - argv.push_back(strdup(gcc.c_str())); - argv.push_back(strdup(gpp.c_str())); +static void debug_arguments(int argc, char** argv, bool original) +{ + string argstxt = argv[ 0 ]; + for( int i = 1; i < argc; ++i ) { + argstxt += ' '; + argstxt += argv[ i ]; } + if( original ) { + trace() << "invoked as: " << argstxt << endl; + } else { + trace() << "expanded as: " << argstxt << endl; + } +} - for (int extracount = 0; extrafiles[extracount]; extracount++) { - argv.push_back(strdup("--addfile")); - argv.push_back(strdup(extrafiles[extracount])); +class ArgumentExpander +{ +public: + ArgumentExpander(int *argcp, char ***argvp) + { + oldargv = *argvp; + oldargc = *argcp; + expandargv(argcp, argvp); + + newargv = *argvp; + if (newargv == oldargv) + newargv = NULL; } - argv.push_back(NULL); - execv(argv[0], argv.data()); - log_perror("execv failed"); - return -1; + ~ArgumentExpander() + { + if (newargv != NULL) + freeargv(newargv); + } -} + bool changed() const + { + return newargv != NULL; + } + + char** originalArgv() const + { + return oldargv; + } + + int originalArgc() const + { + return oldargc; + } + +private: + char ** newargv; + char ** oldargv; + int oldargc; +}; int main(int argc, char **argv) { - char *env = getenv("ICECC_DEBUG"); + // expand @responsefile contents to arguments in argv array + ArgumentExpander expand(&argc, &argv); + + const char *env = getenv("ICECC_DEBUG"); int debug_level = Error; if (env) { if (!strcasecmp(env, "info")) { - debug_level |= Info | Warning; - } else if (!strcasecmp(env, "warnings")) { - debug_level |= Warning; // taking out warning + debug_level = Info; + } else if (!strcasecmp(env, "warning") || !strcasecmp(env, "warnings")) { + // "warnings was referred to in the --help output, handle it + // backwards compatibility. + debug_level = Warning; } else { // any other value - debug_level |= Info | Debug | Warning; + debug_level = Debug; } } @@ -266,6 +306,11 @@ int main(int argc, char **argv) setup_debug(debug_level, logfile, "ICECC"); + debug_arguments(expand.originalArgc(), expand.originalArgv(), true); + if( expand.changed()) { + debug_arguments(argc, argv, false); + } + CompileJob job; bool icerun = false; @@ -331,15 +376,28 @@ int main(int argc, char **argv) int sg_level = dcc_recursion_safeguard(); - if (sg_level > 0) { + if (sg_level >= SafeguardMaxLevel) { log_error() << "icecream seems to have invoked itself recursively!" << endl; return EXIT_RECURSION; } + if (sg_level > 0) { + log_info() << "recursive invocation from icerun" << endl; + } /* Ignore SIGPIPE; we consistently check error codes and will * see the EPIPE. */ dcc_ignore_sigpipe(1); + // Connect to the daemon as early as possible, so that in parallel builds there + // the daemon has as many connections as possible when we start asking for a remote + // node to build, allowing the daemon/scheduler to do load balancing based on the number + // of expected build jobs. + MsgChannel *local_daemon = NULL; + const char *icecc = getenv("ICECC"); + if (icecc == NULL || strcasecmp(icecc, "disable") != 0) { + local_daemon = get_local_daemon(); + } + list extrafiles; local |= analyse_argv(argv, job, icerun, &extrafiles); @@ -349,9 +407,8 @@ int main(int argc, char **argv) If ICECC is set to no, the job is run locally as well, but it is serialized using the daemon, so several may be run at once. */ - char *icecc = getenv("ICECC"); - if (icecc && !strcasecmp(icecc, "disable")) { + assert( local_daemon == NULL ); return build_local(job, 0); } @@ -359,6 +416,11 @@ int main(int argc, char **argv) local = true; } + if (!local_daemon) { + log_warning() << "no local daemon found" << endl; + return build_local(job, 0); + } + if (const char *extrafilesenv = getenv("ICECC_EXTRAFILES")) { for (;;) { const char *colon = strchr(extrafilesenv, ':'); @@ -377,7 +439,8 @@ int main(int argc, char **argv) extrafiles.push_back(file); } else { log_warning() << "File in ICECC_EXTRAFILES not found: " << file << endl; - return build_local(job, 0); + local = true; + break; } if (colon == NULL) { @@ -388,65 +451,38 @@ int main(int argc, char **argv) } } - MsgChannel *local_daemon; - if (getenv("ICECC_TEST_SOCKET") == NULL) { - /* try several options to reach the local daemon - 3 sockets, one TCP */ - local_daemon = Service::createChannel("/var/run/icecc/iceccd.socket"); - - if (!local_daemon) { - local_daemon = Service::createChannel("/var/run/iceccd.socket"); - } - - if (!local_daemon && getenv("HOME")) { - string path = getenv("HOME"); - path += "/.iceccd.socket"; - local_daemon = Service::createChannel(path); - } - - if (!local_daemon) { - local_daemon = Service::createChannel("127.0.0.1", 10245, 0/*timeout*/); - } - } else { - local_daemon = Service::createChannel(getenv("ICECC_TEST_SOCKET")); - if (!local_daemon) { - log_error() << "test socket error" << endl; - return EXIT_TEST_SOCKET_ERROR; - } - } - - if (!local_daemon) { - log_warning() << "no local daemon found" << endl; - return build_local(job, 0); - } - Environments envs; if (!local) { if (getenv("ICECC_VERSION")) { // if set, use it, otherwise take default try { envs = parse_icecc_version(job.targetPlatform(), find_prefix(job.compilerName())); - } catch (std::exception) { + } catch (std::exception& e) { // we just build locally + log_error() << "An exception was handled parsing the icecc version. " + "Will build locally. Exception text was:\n" << e.what() << "\n"; } } else if (!extrafiles.empty() && !IS_PROTOCOL_32(local_daemon)) { - log_warning() << "Local daemon is too old to handle compiler plugins." << endl; + log_warning() << "Local daemon is too old to handle extra files." << endl; local = true; } else { + Msg *umsg = NULL; if (!local_daemon->send_msg(GetNativeEnvMsg(compiler_is_clang(job) ? "clang" : "gcc", extrafiles))) { log_warning() << "failed to write get native environment" << endl; - goto do_local_error; + local = true; + } else { + // the timeout is high because it creates the native version + umsg = local_daemon->get_msg(4 * 60); } - // the timeout is high because it creates the native version - Msg *umsg = local_daemon->get_msg(4 * 60); string native; if (umsg && umsg->type == M_NATIVE_ENV) { native = static_cast(umsg)->nativeVersion; } - if (native.empty() || ::access(native.c_str(), R_OK)) { + if (native.empty() || ::access(native.c_str(), R_OK) < 0) { log_warning() << "daemon can't determine native environment. " "Set $ICECC_VERSION to an icecc environment.\n"; } else { @@ -465,7 +501,7 @@ int main(int argc, char **argv) for (Environments::const_iterator it = envs.begin(); it != envs.end(); ++it) { trace() << "env: " << it->first << " '" << it->second << "'" << endl; - if (::access(it->second.c_str(), R_OK)) { + if (::access(it->second.c_str(), R_OK) < 0) { log_error() << "can't read environment " << it->second << endl; local = true; } @@ -474,32 +510,13 @@ int main(int argc, char **argv) int ret; - if (local) { - log_block b("building_local"); - struct rusage ru; - Msg *startme = 0L; - - /* Inform the daemon that we like to start a job. */ - if (local_daemon->send_msg(JobLocalBeginMsg(0, get_absfilename(job.outputFile())))) { - /* Now wait until the daemon gives us the start signal. 40 minutes - should be enough for all normal compile or link jobs. */ - startme = local_daemon->get_msg(40 * 60); - } - - /* If we can't talk to the daemon anymore we need to fall back - to lock file locking. */ - if (!startme || startme->type != M_JOB_LOCAL_BEGIN) { - delete startme; - goto do_local_error; - } - - ret = build_local(job, local_daemon, &ru); - delete startme; - } else { + if (!local) { try { - // check if it should be compiled three times + // How many times out of 1000 should we recompile a job on + // multiple hosts to confirm that the results are the same? const char *s = getenv("ICECC_REPEAT_RATE"); int rate = s ? atoi(s) : 0; + ret = build_remote(job, local_daemon, envs, rate); /* We have to tell the local daemon that everything is fine and @@ -512,7 +529,7 @@ int main(int argc, char **argv) } } catch (remote_error& error) { log_info() << "local build forced by remote exception: " << error.what() << endl; - goto do_local_error; + local = true; } catch (client_error& error) { if (remote_daemon.size()) { @@ -523,19 +540,52 @@ int main(int argc, char **argv) endl; } +#if 0 /* currently debugging a client? throw an error then */ - if (debug_level != Error) { + if (debug_level > Error) { return error.errorCode; } +#endif - goto do_local_error; + local = true; + } + if (local) { + // TODO It'd be better to reuse the connection, but the daemon + // internal state gets confused for some reason, so work that around + // for now by using a new connection. + delete local_daemon; + local_daemon = get_local_daemon(); + if (!local_daemon) { + log_warning() << "no local daemon found" << endl; + return build_local(job, 0); + } } } - delete local_daemon; - return ret; + if (local) { + log_block b("building_local"); + struct rusage ru; + Msg *startme = 0L; + + /* Inform the daemon that we like to start a job. */ + if (local_daemon->send_msg(JobLocalBeginMsg(0, get_absfilename(job.outputFile())))) { + /* Now wait until the daemon gives us the start signal. 40 minutes + should be enough for all normal compile or link jobs. */ + startme = local_daemon->get_msg(40 * 60); + } + + /* If we can't talk to the daemon anymore we need to fall back + to lock file locking. */ + if (!startme || startme->type != M_JOB_LOCAL_BEGIN) { + delete startme; + delete local_daemon; + return build_local(job, 0); + } + + ret = build_local(job, local_daemon, &ru); + delete startme; + } -do_local_error: delete local_daemon; - return build_local(job, 0); + return ret; } diff --git a/client/md5.h b/client/md5.h index 6fced3d83..6796eb0a3 100644 --- a/client/md5.h +++ b/client/md5.h @@ -44,14 +44,6 @@ #ifndef md5_INCLUDED # define md5_INCLUDED -/* - * This code has some adaptations for the Ghostscript environment, but it - * will compile and run correctly in any environment with 8-bit chars and - * 32-bit ints. Specifically, it assumes that if the following are - * defined, they have the same meaning as in Ghostscript: P1, P2, P3, - * ARCH_IS_BIG_ENDIAN. - */ - typedef unsigned char md5_byte_t; /* 8-bit byte */ typedef unsigned int md5_word_t; /* 32-bit word */ @@ -67,26 +59,14 @@ extern "C" { #endif - /* Initialize the algorithm. */ -#ifdef P1 - void md5_init(P1(md5_state_t *pms)); -#else - void md5_init(md5_state_t *pms); -#endif +/* Initialize the algorithm. */ +void md5_init(md5_state_t *pms); - /* Append a string to the message. */ -#ifdef P3 - void md5_append(P3(md5_state_t *pms, const md5_byte_t *data, int nbytes)); -#else - void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); -#endif +/* Append a string to the message. */ +void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes); - /* Finish the message and return the digest. */ -#ifdef P2 - void md5_finish(P2(md5_state_t *pms, md5_byte_t digest[16])); -#else - void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); -#endif +/* Finish the message and return the digest. */ +void md5_finish(md5_state_t *pms, md5_byte_t digest[16]); #ifdef __cplusplus } /* end extern "C" */ diff --git a/client/remote.cpp b/client/remote.cpp index 1b6cf535a..0f9e7740b 100644 --- a/client/remote.cpp +++ b/client/remote.cpp @@ -125,7 +125,7 @@ parse_icecc_version(const string &target_platform, const string &prefix) continue; } - if (::access(version.c_str(), R_OK)) { + if (::access(version.c_str(), R_OK) < 0) { log_error() << "$ICECC_VERSION has to point to an existing file to be installed " << version << endl; continue; } @@ -481,6 +481,13 @@ static int build_remote_int(CompileJob &job, UseCSMsg *usecs, MsgChannel *local_ throw client_error(26, "Error 26 - environment on " + hostname + " cannot be verified"); } + // Older remotes don't set properly -x argument. + if(( job.language() == CompileJob::Lang_OBJC || job.language() == CompileJob::Lang_OBJCXX ) + && !IS_PROTOCOL_38(cserver)) { + job.appendFlag( "-x", Arg_Remote ); + job.appendFlag( job.language() == CompileJob::Lang_OBJC ? "objective-c" : "objective-c++", Arg_Remote ); + } + CompileFileMsg compile_file(&job); { log_block b("send compile_file"); @@ -523,6 +530,7 @@ static int build_remote_int(CompileJob &job, UseCSMsg *usecs, MsgChannel *local_ if (shell_exit_status(status) != 0) { // failure delete cserver; cserver = 0; + log_warning() << "call_cpp process failed with exit status " << shell_exit_status(status) << endl; return shell_exit_status(status); } } else { @@ -574,6 +582,7 @@ static int build_remote_int(CompileJob &job, UseCSMsg *usecs, MsgChannel *local_ if ((!crmsg->out.empty() || !crmsg->err.empty()) && output_needs_workaround(job)) { delete crmsg; log_info() << "command needs stdout/stderr workaround, recompiling locally" << endl; + log_info() << "(set ICECC_CARET_WORKAROUND=0 to override)" << endl; throw remote_error(102, "Error 102 - command needs stdout/stderr workaround, recompiling locally"); } @@ -606,7 +615,7 @@ static int build_remote_int(CompileJob &job, UseCSMsg *usecs, MsgChannel *local_ } catch (...) { // Handle pending status messages, if any. if(cserver) { - while(Msg* msg = cserver->get_msg(0)) { + while(Msg* msg = cserver->get_msg(0, true)) { if(msg->type == M_STATUS_TEXT) log_error() << "Remote status (compiled on " << cserver->name << "): " << static_cast(msg)->text << endl; @@ -754,8 +763,13 @@ int build_remote(CompileJob &job, MsgChannel *local_daemon, const Environments & } } - trace() << job.inputFile() << " compiled " << torepeat << " times on " - << job.targetPlatform() << "\n"; + if( torepeat == 1 ) { + trace() << "preparing " << job.inputFile() << " to be compiled for " + << job.targetPlatform() << "\n"; + } else { + trace() << "preparing " << job.inputFile() << " to be compiled " << torepeat << " times for " + << job.targetPlatform() << "\n"; + } map versionfile_map, version_map; Environments envs = rip_out_paths(_envs, version_map, versionfile_map); @@ -796,11 +810,16 @@ int build_remote(CompileJob &job, MsgChannel *local_daemon, const Environments & UseCSMsg *usecs = get_server(local_daemon); int ret; - if (!maybe_build_local(local_daemon, usecs, job, ret)) - ret = build_remote_int(job, usecs, local_daemon, - version_map[usecs->host_platform], - versionfile_map[usecs->host_platform], - 0, true); + try { + if (!maybe_build_local(local_daemon, usecs, job, ret)) + ret = build_remote_int(job, usecs, local_daemon, + version_map[usecs->host_platform], + versionfile_map[usecs->host_platform], + 0, true); + } catch(...) { + delete usecs; + throw; + } delete usecs; return ret; @@ -822,6 +841,7 @@ int build_remote(CompileJob &job, MsgChannel *local_daemon, const Environments & waitpid(cpp_pid, &status, 0); if (shell_exit_status(status)) { // failure + log_warning() << "call_cpp process failed with exit status " << shell_exit_status(status) << endl; ::unlink(preproc); return shell_exit_status(status); } diff --git a/client/safeguard.cpp b/client/safeguard.cpp index 4ec7fde25..6d463fb74 100644 --- a/client/safeguard.cpp +++ b/client/safeguard.cpp @@ -20,6 +20,8 @@ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ +#include "client.h" + #include "logging.h" using namespace std; @@ -37,12 +39,11 @@ using namespace std; **/ static const char dcc_safeguard_name[] = "_ICECC_SAFEGUARD"; -static char dcc_safeguard_set[] = "_ICECC_SAFEGUARD=1"; static int dcc_safeguard_level; int dcc_recursion_safeguard(void) { - char *env = getenv(dcc_safeguard_name); + const char *env = getenv(dcc_safeguard_name); if (env) { //trace() << "safeguard: " << env << endl; @@ -59,14 +60,11 @@ int dcc_recursion_safeguard(void) } -void dcc_increment_safeguard(void) +void dcc_increment_safeguard(SafeguardStep step) { - if (dcc_safeguard_level > 0) { - dcc_safeguard_set[sizeof dcc_safeguard_set - 2] = dcc_safeguard_level + '1'; - } - + char value[2] = { (char)(dcc_safeguard_level + step + '0'), '\0' }; //trace() << "setting safeguard: " << dcc_safeguard_set << endl; - if ((putenv(strdup(dcc_safeguard_set)) == -1)) { + if (setenv(dcc_safeguard_name, value, 1) == -1) { log_error() << "putenv failed" << endl; } } diff --git a/client/util.cpp b/client/util.cpp index 609b365c8..7c8d519ee 100644 --- a/client/util.cpp +++ b/client/util.cpp @@ -368,3 +368,26 @@ std::string get_cwd() return string(&buffer[0]); } + +std::string read_command_output(const std::string& command) +{ + FILE *f = popen(command.c_str(), "r"); + string output; + + if (!f) { + log_error() << "no pipe " << strerror(errno) << endl; + return output; + } + + char buffer[1024]; + + while (!feof(f)) { + size_t bytes = fread(buffer, 1, sizeof(buffer) - 1, f); + buffer[bytes] = 0; + output += buffer; + } + + pclose(f); + // get rid of the endline + return output.substr(0, output.length() - 1); +} diff --git a/client/util.h b/client/util.h index a40dc2e9c..ad7430e9d 100644 --- a/client/util.h +++ b/client/util.h @@ -37,6 +37,7 @@ extern bool output_needs_workaround(const CompileJob &job); extern bool ignore_unverified(); extern int resolve_link(const std::string &file, std::string &resolved); extern std::string get_cwd(); +extern std::string read_command_output(const std::string& command); extern bool dcc_unlock(int lock_fd); extern bool dcc_lock_host(int &lock_fd); diff --git a/compilerwrapper/compilerwrapper.cpp b/compilerwrapper/compilerwrapper.cpp index ae9612cba..a5a1ef1d9 100644 --- a/compilerwrapper/compilerwrapper.cpp +++ b/compilerwrapper/compilerwrapper.cpp @@ -36,6 +36,7 @@ Which one depends on an extra argument added by icecream. #include #include +#include #include #include @@ -143,5 +144,5 @@ int main(int argc, char *argv[]) #endif execv(args[0], args); fprintf(stderr, "execv failed\n"); - return 1; + exit(1); } diff --git a/configure.ac b/configure.ac index eb4e7c3e4..b377cc551 100644 --- a/configure.ac +++ b/configure.ac @@ -5,12 +5,15 @@ AC_PREREQ([2.63]) # ==================== # Version informations # ==================== +# Stable versions: x.y.z , where z < 50 +# Development versions: x.y.90 +# Pre-release versions: x.y.z, where z = 90 + X in rcX (1.1rc1 = 1.1.91) m4_define([icecream_version_major],[1]) m4_define([icecream_version_minor],[1]) -#m4_define([icecream_version_micro],[98]) +m4_define([icecream_version_micro],[90]) m4_ifval([icecream_version_micro], - m4_define([icecream_version],[icecream_version_major.icecream_version_minor.icecream_version_micro]), - m4_define([icecream_version],[icecream_version_major.icecream_version_minor])) + [m4_define([icecream_version],[icecream_version_major.icecream_version_minor.icecream_version_micro])], + [m4_define([icecream_version],[icecream_version_major.icecream_version_minor])]) # ============= # Automake init @@ -195,6 +198,8 @@ case $host_os in esac AC_SUBST(LIB_KINFO) +AC_CHECK_PROG(CLANG,clang,clang) + AC_ARG_ENABLE(clang-rewrite-includes, AS_HELP_STRING([--enable-clang-rewrite-includes], [Use by default Clang's -frewrite-includes option.])) @@ -204,7 +209,6 @@ if test "$enable_clang_rewrite_includes" = "yes"; then elif test "$enable_clang_rewrite_includes" = "no"; then true # do not enable else - AC_CHECK_PROG(CLANG,clang,clang) if test -n "$CLANG"; then AC_MSG_CHECKING([whether clang -Werror works for unknown options]) $CLANG -Werror -totallybogusoption -E - >/dev/null 2>/dev/null @@ -225,17 +229,6 @@ else fi fi -HAVE_CLANG_DEVEL_DEP= -AC_CHECK_PROG(CLANG,clang,clang) -AC_LANG_PUSH([C++]) -save_CPPFLAGS=$CPPFLAGS -CPPFLAGS="$CPPFLAGS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS" -AC_CHECK_HEADER(clang/AST/RecursiveASTVisitor.h, - [ HAVE_CLANG_DEVEL_DEP=clangplugin ] ) -CPPFLAGS="$save_CPPFLAGS" -AC_LANG_POP([C++]) -AC_SUBST(HAVE_CLANG_DEVEL_DEP) - AC_ARG_ENABLE(clang-wrappers, AS_HELP_STRING([--enable-clang-wrappers], [Use symlink wrappers for clang/clang++.])) @@ -246,7 +239,6 @@ if test "$enable_clang_wrappers" = "yes"; then elif test "$enable_clang_wrappers" = "no"; then true # do not enable else - AC_CHECK_PROG(CLANG,clang,clang) if test -n "$CLANG"; then CLANG_SYMLINK_WRAPPERS='clang clang++' fi @@ -285,6 +277,42 @@ else fi fi +AC_ARG_ENABLE(gcc-fdirectives-only, + AS_HELP_STRING([--enable-gcc-fdirectives-only], + [Use by default GCC's -fdirectives-only option.])) + +if test "$enable_gcc_fdirectives_only" = "yes"; then + AC_DEFINE(HAVE_GCC_FDIRECTIVES_ONLY, 1, [Define to 1 if gcc supports -fdirectives-only]) +elif test "$enable_gcc_fdirectives_only" = "no"; then + true # do not enable +else + AC_CHECK_PROG(GCC_BIN,gcc,gcc) + AC_MSG_CHECKING([whether $GCC_BIN -Werror works for unknown options]) + $GCC_BIN -Werror -totallybogusoption -E - >/dev/null 2>/dev/null + if test $? -eq 0; then + AC_MSG_RESULT(no) + # can't detect if the option is supported, but that's too old clang anyway + else + AC_MSG_RESULT(yes) + AC_MSG_CHECKING([for $GCC_BIN -E -fdirectives-only]) + $GCC_BIN -Werror -E -fdirectives-only - >/dev/null 2>/dev/null + if test $? -eq 0; then + AC_MSG_RESULT(yes) + AC_DEFINE(HAVE_GCC_FDIRECTIVES_ONLY, 1, [Define to 1 if gcc supports -fdirectives-only]) + else + AC_MSG_RESULT(no) + fi + fi +fi + +AC_MSG_CHECKING([for -fsanitize= usage]) +if echo "$CXXFLAGS" | grep -q -- -fsanitize; then + AC_DEFINE(SANITIZER_USED, 1, [Define to 1 if compiled with -fsanitize option(s)]) + AC_MSG_RESULT(yes) +else + AC_MSG_RESULT(no) +fi + AC_CONFIG_FILES([ Makefile ]) AC_CONFIG_FILES([ client/Makefile ]) AC_CONFIG_FILES([ daemon/Makefile ]) @@ -296,6 +324,8 @@ AC_CONFIG_FILES([ compilerwrapper/Makefile ]) AC_CONFIG_FILES([ scheduler/Makefile ]) AC_CONFIG_FILES([ tests/Makefile ]) AC_CONFIG_FILES([ client/icecc-create-env ]) +AC_CONFIG_FILES([ client/icecc-test-env ]) +AC_CONFIG_FILES([ tests/test-setup.sh ]) AC_OUTPUT([ suse/icecream.spec ]) if test "$prefix" = NONE; then prefix=$ac_default_prefix diff --git a/daemon/environment.cpp b/daemon/environment.cpp index 926b1cac1..4f163f4a5 100644 --- a/daemon/environment.cpp +++ b/daemon/environment.cpp @@ -40,50 +40,6 @@ using namespace std; -#if 0 -static string read_fromFILE(FILE *f) -{ - string output; - - if (!f) { - log_error() << "no pipe " << strerror(errno) << endl; - return output; - } - - char buffer[100]; - - while (!feof(f)) { - size_t bytes = fread(buffer, 1, 99, f); - buffer[bytes] = 0; - output += buffer; - } - - pclose(f); - return output; -} - -static bool extract_version(string &version) -{ - string::size_type pos = version.find_last_of('\n'); - - if (pos == string::npos) { - return false; - } - - while (pos + 1 == version.size()) { - version.resize(version.size() - 1); - pos = version.find_last_of('\n'); - - if (pos == string::npos) { - return false; - } - } - - version = version.substr(pos + 1); - return true; -} -#endif - size_t sumup_dir(const string &dir) { size_t res = 0; @@ -131,7 +87,7 @@ static void list_target_dirs(const string ¤t_target, const string &targetd for (struct dirent *ent = readdir(envdir); ent; ent = readdir(envdir)) { string dirname = ent->d_name; - if (!access(string(targetdir + "/" + dirname + "/usr/bin/as").c_str(), X_OK)) { + if (access(string(targetdir + "/" + dirname + "/usr/bin/as").c_str(), X_OK) == 0) { envs.push_back(make_pair(current_target, dirname)); } } @@ -414,6 +370,7 @@ int start_create_env(const string &basedir, uid_t user_uid, gid_t user_gid, int pos = 0; argv[pos++] = BINDIR "/icecc"; argv[pos++] = "--build-native"; + const int first_to_free = pos; argv[pos++] = strdup(compiler.c_str()); for (list::const_iterator it = extrafiles.begin(); it != extrafiles.end(); ++it) { @@ -426,6 +383,9 @@ int start_create_env(const string &basedir, uid_t user_uid, gid_t user_gid, log_error() << BINDIR "/icecc --build-native failed" << endl; _exit(1); } + for( int i = first_to_free; i < pos; ++i ) + free( (void*) argv[ i ] ); + delete[] argv; _exit(0); } @@ -781,8 +741,8 @@ bool verify_env(MsgChannel *client, const string &basedir, const string &target, string dirname = basedir + "/target=" + target + "/" + env; - if (::access(string(dirname + "/bin/true").c_str(), X_OK)) { - error_client(client, dirname + "/bin/true is not executable"); + if (::access(string(dirname + "/bin/true").c_str(), X_OK) < 0) { + error_client(client, dirname + "/bin/true is not executable, installed environment removed?"); log_error() << "I don't have environment " << env << "(" << target << ") to verify." << endl; return false; } @@ -803,7 +763,7 @@ bool verify_env(MsgChannel *client, const string &basedir, const string &target, } // child - reset_debug(0); + reset_debug(); chdir_to_environment(client, dirname, user_uid, user_gid); execl("bin/true", "bin/true", (void*)NULL); log_perror("execl failed"); diff --git a/daemon/load.cpp b/daemon/load.cpp index 1d2574dc1..5972aca97 100644 --- a/daemon/load.cpp +++ b/daemon/load.cpp @@ -78,6 +78,11 @@ struct CPULoadInfo { load_t waitTicks; CPULoadInfo() { + userLoad = 0; + niceLoad = 0; + sysLoad = 0; + idleLoad = 0; + userTicks = 0; niceTicks = 0; sysTicks = 0; @@ -199,11 +204,7 @@ static void updateCPULoad(CPULoadInfo *load) load->userLoad = (1000 * (currUserTicks - load->userTicks)) / totalTicks; load->sysLoad = (1000 * (currSysTicks - load->sysTicks)) / totalTicks; load->niceLoad = (1000 * (currNiceTicks - load->niceTicks)) / totalTicks; - load->idleLoad = (1000 - (load->userLoad + load->sysLoad + load->niceLoad)); - - if (load->idleLoad < 0) { - load->idleLoad = 0; - } + load->idleLoad = (1000 * (currIdleTicks - load->idleTicks)) / totalTicks; } else { load->userLoad = load->sysLoad = load->niceLoad = 0; load->idleLoad = 1000; @@ -216,7 +217,7 @@ static void updateCPULoad(CPULoadInfo *load) load->waitTicks = currWaitTicks; } -#ifndef USE_SYSCTL +#if !defined(USE_SYSCTL) && !defined(USE_MACH) static unsigned long int scan_one(const char *buff, const char *key) { const char *b = strstr(buff, key); @@ -237,7 +238,7 @@ static unsigned long int scan_one(const char *buff, const char *key) static unsigned int calculateMemLoad(unsigned long int &NetMemFree) { - unsigned long long MemFree = 0, Buffers = 0, Cached = 0; + unsigned long long MemTotal = 0, MemFree = 0, Buffers = 0, Cached = 0; #ifdef USE_MACH /* Get VM statistics. */ @@ -262,11 +263,15 @@ static unsigned int calculateMemLoad(unsigned long int &NetMemFree) #elif defined( USE_SYSCTL ) size_t len = sizeof(MemFree); + + if ((sysctlbyname("hw.physmem", &MemTotal, &len, NULL, 0) == -1) || !len) { + MemTotal = 0; /* Doesn't work under FreeBSD v2.2.x */ + } + if ((sysctlbyname("vm.stats.vm.v_free_count", &MemFree, &len, NULL, 0) == -1) || !len) { MemFree = 0; /* Doesn't work under FreeBSD v2.2.x */ } - len = sizeof(Buffers); if ((sysctlbyname("vfs.bufspace", &Buffers, &len, NULL, 0) == -1) || !len) { @@ -308,30 +313,31 @@ static unsigned int calculateMemLoad(unsigned long int &NetMemFree) } buf[n] = '\0'; + MemTotal = scan_one(buf, "MemTotal"); MemFree = scan_one(buf, "MemFree"); Buffers = scan_one(buf, "Buffers"); Cached = scan_one(buf, "Cached"); #endif - if (Buffers > 50 * 1024) { - Buffers -= 50 * 1024; + /* Can't calculate a memory load if we don't know how much memory we have */ + if (!MemTotal) + return 0; + + if (Buffers > MemTotal / 100) { + Buffers -= MemTotal / 100; } else { Buffers /= 2; } - if (Cached > 50 * 1024) { - Cached -= 50 * 1024; + if (Cached > MemTotal / 100) { + Cached -= MemTotal / 100; } else { Cached /= 2; } NetMemFree = MemFree + Cached + Buffers; - if (NetMemFree > 128 * 1024) { - return 0; - } - - return 1000 - (NetMemFree * 1000 / (128 * 1024)); + return 1000 - (NetMemFree * 1000 / MemTotal); } // Load average calculation based on CALC_LOAD(), in the 2.6 Linux kernel @@ -383,7 +389,7 @@ int fakeloadavg(double *p_result, int resultEntries, unsigned int currentJobs) return numFilled; } -bool fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned int &memory_fillgrade, StatsMsg *msg, unsigned int hint) +void fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned int &memory_fillgrade, StatsMsg *msg, unsigned int hint) { static CPULoadInfo load; @@ -411,6 +417,4 @@ bool fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned i msg->freeMem = (load_t)(MemFree / 1024.0 + 0.5); } - - return true; } diff --git a/daemon/load.h b/daemon/load.h index 0105a6cc1..af84d5d6d 100644 --- a/daemon/load.h +++ b/daemon/load.h @@ -26,6 +26,6 @@ #include // 'hint' is used to approximate the load, whenever getloadavg() is unavailable. -bool fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned int &memory_fillgrade, StatsMsg *msg, unsigned int hint); +void fill_stats(unsigned long &myidleload, unsigned long &myniceload, unsigned int &memory_fillgrade, StatsMsg *msg, unsigned int hint); #endif diff --git a/daemon/main.cpp b/daemon/main.cpp index ad69161f8..aaf5291f0 100644 --- a/daemon/main.cpp +++ b/daemon/main.cpp @@ -411,7 +411,14 @@ void usage(const char *reason = 0) } struct timeval last_stat; + +// Initial rlimit for a compile job, measured in megabytes. Will vary with +// the amount of available memory. int mem_limit = 100; + +// Minimum rlimit for a compile job, measured in megabytes. +const int min_mem_limit = 100; + unsigned int max_kids = 0; size_t cache_size_limit = 100 * 1024 * 1024; @@ -425,6 +432,12 @@ struct NativeEnvironment { time_t gpp_bin_timestamp; time_t clang_bin_timestamp; int create_env_pipe; // if in progress of creating the environment + NativeEnvironment() { + gcc_bin_timestamp = 0; + gpp_bin_timestamp = 0; + clang_bin_timestamp = 0; + create_env_pipe = 0; + } }; struct Daemon { @@ -483,7 +496,7 @@ struct Daemon { user_gid = getgid(); } - envbasedir = "/tmp/icecc-envs"; + envbasedir = "/var/tmp/icecc-envs"; tcp_listen_fd = -1; unix_listen_fd = -1; new_client_id = 0; @@ -509,7 +522,7 @@ struct Daemon { } bool reannounce_environments() __attribute_warn_unused_result__; - int answer_client_requests(); + void answer_client_requests(); bool handle_transfer_env(Client *client, Msg *msg) __attribute_warn_unused_result__; bool handle_transfer_env_done(Client *client); bool handle_get_native_env(Client *client, GetNativeEnvMsg *msg) __attribute_warn_unused_result__; @@ -533,7 +546,7 @@ struct Daemon { string dump_internals() const; string determine_nodename(); void determine_system(); - bool maybe_stats(bool force = false); + bool maybe_stats(bool force_check = false); bool send_scheduler(const Msg &msg) __attribute_warn_unused_result__; void close_scheduler(); bool reconnect(); @@ -602,7 +615,8 @@ bool Daemon::setup_listen_fds() myaddr.sun_family = AF_UNIX; - mode_t old_umask = -1U; + bool reset_umask = false; + mode_t old_umask = 0; if (getenv("ICECC_TEST_SOCKET") == NULL) { #ifdef HAVE_LIBCAP_NG @@ -617,10 +631,11 @@ bool Daemon::setup_listen_fds() if(default_socket.length() > sizeof(myaddr.sun_path) - 1) { log_error() << "default socket path too long for sun_path" << endl; } - if (-1 == unlink(myaddr.sun_path)){ + if (-1 == unlink(myaddr.sun_path) && errno != ENOENT){ log_perror("unlink failed") << "\t" << myaddr.sun_path << endl; } old_umask = umask(0); + reset_umask = true; } else { // Started by user. if( getenv( "HOME" )) { string socket_path = getenv("HOME"); @@ -630,7 +645,7 @@ bool Daemon::setup_listen_fds() if(socket_path.length() > sizeof(myaddr.sun_path) - 1) { log_error() << "$HOME/.iceccd.socket path too long for sun_path" << endl; } - if (-1 == unlink(myaddr.sun_path)){ + if (-1 == unlink(myaddr.sun_path) && errno != ENOENT){ log_perror("unlink failed") << "\t" << myaddr.sun_path << endl; } } else { @@ -645,7 +660,7 @@ bool Daemon::setup_listen_fds() if(test_socket.length() > sizeof(myaddr.sun_path) - 1) { log_error() << "$ICECC_TEST_SOCKET path too long for sun_path" << endl; } - if (-1 == unlink(myaddr.sun_path)){ + if (-1 == unlink(myaddr.sun_path) && errno != ENOENT){ log_perror("unlink failed") << "\t" << myaddr.sun_path << endl; } } @@ -653,14 +668,14 @@ bool Daemon::setup_listen_fds() if (bind(unix_listen_fd, (struct sockaddr*)&myaddr, sizeof(myaddr)) < 0) { log_perror("bind()"); - if (old_umask != -1U) { + if (reset_umask) { umask(old_umask); } return false; } - if (old_umask != -1U) { + if (reset_umask) { umask(old_umask); } @@ -745,24 +760,25 @@ void Daemon::close_scheduler() delete discover; discover = 0; next_scheduler_connect = time(0) + 20 + (rand() & 31); + static bool fast_reconnect = getenv( "ICECC_TESTS" ) != NULL; + if( fast_reconnect ) + next_scheduler_connect = time(0) + 3; } -bool Daemon::maybe_stats(bool send_ping) +bool Daemon::maybe_stats(bool force_check) { struct timeval now; gettimeofday(&now, 0); time_t diff_sent = (now.tv_sec - last_stat.tv_sec) * 1000 + (now.tv_usec - last_stat.tv_usec) / 1000; - if (diff_sent >= max_scheduler_pong * 1000) { + if (diff_sent >= max_scheduler_pong * 1000 || force_check) { StatsMsg msg; unsigned int memory_fillgrade; unsigned long idleLoad = 0; unsigned long niceLoad = 0; - if (!fill_stats(idleLoad, niceLoad, memory_fillgrade, &msg, clients.active_processes)) { - return false; - } + fill_stats(idleLoad, niceLoad, memory_fillgrade, &msg, clients.active_processes); time_t diff_stat = (now.tv_sec - last_stat.tv_sec) * 1000 + (now.tv_usec - last_stat.tv_usec) / 1000; last_stat = now; @@ -789,40 +805,34 @@ bool Daemon::maybe_stats(bool send_ping) icecream_usage.tv_usec = ru.ru_utime.tv_usec; } - int idle_average = icecream_load; + unsigned int idle_average = icecream_load; if (diff_sent) { idle_average = icecream_load * 1000 / diff_sent; } - if (idle_average > 1000) { - idle_average = 1000; - } - - msg.load = ((700 * (1000 - idle_average)) + (300 * memory_fillgrade)) / 1000; + if (idle_average > 1000) + idle_average = 1000; - if (memory_fillgrade > 600) { - msg.load = 1000; - } - - if (idle_average < 100) { - msg.load = 1000; - } + msg.load = std::max((1000 - idle_average), memory_fillgrade); #ifdef HAVE_SYS_VFS_H struct statfs buf; int ret = statfs(envbasedir.c_str(), &buf); - if (!ret && long(buf.f_bavail) < ((long(max_kids + 1 - current_kids) * 4 * 1024 * 1024) / buf.f_bsize)) { + // Require at least 25MiB of free disk space per build. + if (!ret && long(buf.f_bavail) < ((long(max_kids + 1 - current_kids) * 25 * 1024 * 1024) / buf.f_bsize)) { msg.load = 1000; } #endif // Matz got in the urine that not all CPUs are always feed - mem_limit = std::max(int(msg.freeMem / std::min(std::max(max_kids, 1U), 4U)), int(100U)); + mem_limit = std::max(int(msg.freeMem / std::min(std::max(max_kids, 1U), 4U)), min_mem_limit); - if (abs(int(msg.load) - current_load) >= 100 || send_ping) { + if (abs(int(msg.load) - current_load) >= 100 + || (msg.load == 1000 && current_load != 1000) + || (msg.load != 1000 && current_load == 1000)) { if (!send_scheduler(msg)) { return false; } @@ -882,14 +892,13 @@ string Daemon::dump_internals() const unsigned long idleLoad = 0; unsigned long niceLoad = 0; - if (fill_stats(idleLoad, niceLoad, memory_fillgrade, &msg, clients.active_processes)) { - result += " cpu: " + toString(idleLoad) + " idle, " - + toString(niceLoad) + " nice\n"; - result += " load: " + toString(msg.loadAvg1 / 1000.) + ", icecream_load: " - + toString(icecream_load) + "\n"; - result += " memory: " + toString(memory_fillgrade) - + " (free: " + toString(msg.freeMem) + ")\n"; - } + fill_stats(idleLoad, niceLoad, memory_fillgrade, &msg, clients.active_processes); + result += " cpu: " + toString(idleLoad) + " idle, " + + toString(niceLoad) + " nice\n"; + result += " load: " + toString(msg.loadAvg1 / 1000.) + ", icecream_load: " + + toString(icecream_load) + "\n"; + result += " memory: " + toString(memory_fillgrade) + + " (free: " + toString(msg.freeMem) + ")\n"; return result; } @@ -907,8 +916,8 @@ int Daemon::scheduler_use_cs(UseCSMsg *msg) << " " << c << " " << msg->hostname << " " << remote_name << endl; if (!c) { - if (send_scheduler(JobDoneMsg(msg->job_id, 107, JobDoneMsg::FROM_SUBMITTER))) { - return 1; + if (send_scheduler(JobDoneMsg(msg->job_id, 107, JobDoneMsg::FROM_SUBMITTER, clients.size()))) { + return 0; } return 1; @@ -938,12 +947,12 @@ int Daemon::scheduler_use_cs(UseCSMsg *msg) int Daemon::scheduler_no_cs(NoCSMsg *msg) { Client *c = clients.find_by_client_id(msg->client_id); - trace() << "handle_use_cs " << msg->job_id << " " << msg->client_id + trace() << "handle_no_cs " << msg->job_id << " " << msg->client_id << " " << c << " " << endl; if (!c) { - if (send_scheduler(JobDoneMsg(msg->job_id, 107, JobDoneMsg::FROM_SUBMITTER))) { - return 1; + if (send_scheduler(JobDoneMsg(msg->job_id, 107, JobDoneMsg::FROM_SUBMITTER, clients.size()))) { + return 0; } return 1; @@ -1038,8 +1047,7 @@ bool Daemon::handle_transfer_env_done(Client *client) bool r = reannounce_environments(); // do that before the file compiles - // we do that here so we're not given out in case of full discs - if (!maybe_stats(true)) { + if (!maybe_stats(true)) { // update stats in case our disk is too full to accept more jobs r = false; } @@ -1222,10 +1230,18 @@ bool Daemon::create_env_finished(string env_key) trace() << "cache_size = " << cache_size << endl; if (!installed_size) { - for (Clients::const_iterator it = clients.begin(); it != clients.end(); ++it) { - if (it->second->pending_create_env == env_key) { - it->second->channel->send_msg(EndMsg()); - handle_end(it->second, 121); + bool repeat = true; + while(repeat) { + repeat = false; + for (Clients::const_iterator it = clients.begin(); it != clients.end(); ++it) { + if (it->second->pending_create_env == env_key) { + it->second->channel->send_msg(EndMsg()); + handle_end(it->second, 121); + // The handle_end call invalidates our iterator, so break out of the loop, + // but try again just in case, until there's no match. + repeat = true; + break; + } } } return false; @@ -1259,6 +1275,9 @@ bool Daemon::handle_job_done(Client *cl, JobDoneMsg *m) assert(msg->job_id == cl->job_id); cl->job_id = 0; // the scheduler doesn't have it anymore + + msg->client_count = clients.size(); + return send_scheduler(*msg); } @@ -1318,7 +1337,7 @@ void Daemon::handle_old_request() int sock = -1; pid_t pid = -1; - trace() << "requests--" << job->jobID() << endl; + trace() << "request for job " << job->jobID() << endl; string envforjob = job->targetPlatform() + "/" + job->environmentVersion(); envs_last_use[envforjob] = time(NULL); @@ -1331,7 +1350,7 @@ void Daemon::handle_old_request() client->pipe_to_child = sock; client->child_pid = pid; - if (!send_scheduler(JobBeginMsg(job->jobID()))) { + if (!send_scheduler(JobBeginMsg(job->jobID(), clients.size()))) { log_info() << "failed sending scheduler about " << job->jobID() << endl; } } else { @@ -1351,7 +1370,7 @@ bool Daemon::handle_compile_done(Client *client) assert(client->child_pid > 0); assert(client->pipe_to_child >= 0); - JobDoneMsg *msg = new JobDoneMsg(client->job->jobID(), -1, JobDoneMsg::FROM_SERVER); + JobDoneMsg *msg = new JobDoneMsg(client->job->jobID(), -1, JobDoneMsg::FROM_SERVER, clients.size()); assert(msg); assert(current_kids > 0); current_kids--; @@ -1368,7 +1387,6 @@ bool Daemon::handle_compile_done(Client *client) msg->user_msec = job_stat[JobStatistics::user_msec]; msg->sys_msec = job_stat[JobStatistics::sys_msec]; msg->pfaults = job_stat[JobStatistics::sys_pfaults]; - end_status = job_stat[JobStatistics::exit_code]; } close(client->pipe_to_child); @@ -1392,7 +1410,7 @@ bool Daemon::handle_compile_file(Client *client, Msg *msg) if (client->status == Client::CLIENTWORK) { assert(job->environmentVersion() == "__client"); - if (!send_scheduler(JobBeginMsg(job->jobID()))) { + if (!send_scheduler(JobBeginMsg(job->jobID(), clients.size()))) { trace() << "can't reach scheduler to tell him about compile file job " << job->jobID() << endl; return false; @@ -1470,17 +1488,21 @@ void Daemon::handle_end(Client *client, int exitcode) if (scheduler && client->status != Client::WAITFORCHILD) { int job_id = client->job_id; + bool use_client_id = false; if (client->status == Client::TOCOMPILE) { job_id = client->job->jobID(); } if (client->status == Client::WAITFORCS) { - job_id = client->client_id; // it's all we have - exitcode = CLIENT_WAS_WAITING_FOR_CS; // this is the message + // We don't know the job id, because we haven't received a reply + // from the scheduler yet. Use client_id to identify the job, + // the scheduler will use it for matching. + use_client_id = true; + assert( client->client_id > 0 ); } - if (job_id > 0) { + if (job_id > 0 || use_client_id) { JobDoneMsg::from_type flag = JobDoneMsg::FROM_SUBMITTER; switch (client->status) { @@ -1506,7 +1528,11 @@ void Daemon::handle_end(Client *client, int exitcode) trace() << "scheduler->send_msg( JobDoneMsg( " << client->dump() << ", " << exitcode << "))\n"; - if (!send_scheduler(JobDoneMsg(job_id, exitcode, flag))) { + JobDoneMsg msg(job_id, exitcode, flag, clients.size()); + if( use_client_id ) { + msg.set_unknown_job_client_id( client->client_id ); + } + if (!send_scheduler(msg)) { trace() << "failed to reach scheduler for remote job done msg!" << endl; } } else if (client->status == Client::CLIENTWORK) { @@ -1565,6 +1591,8 @@ bool Daemon::handle_get_cs(Client *client, Msg *msg) return true; } + umsg->client_count = clients.size(); + return send_scheduler(*umsg); } @@ -1637,7 +1665,7 @@ bool Daemon::handle_activity(Client *client) { assert(client->status != Client::TOCOMPILE); - Msg *msg = client->channel->get_msg(); + Msg *msg = client->channel->get_msg(0, true); if (!msg) { handle_end(client, 118); @@ -1696,7 +1724,7 @@ bool Daemon::handle_activity(Client *client) return ret; } -int Daemon::answer_client_requests() +void Daemon::answer_client_requests() { #ifdef ICECC_DEBUG @@ -1802,7 +1830,14 @@ int Daemon::answer_client_requests() if (ret < 0 && errno != EINTR) { log_perror("select"); - return 5; + close_scheduler(); + return; + } + // Reset debug if needed, but only if we aren't waiting for any child processes to finish, + // otherwise their debug output could end up reset in the middle (and flush log marks used + // by tests could be written out before debug output from children). + if( current_kids == 0 ) { + reset_debug_if_needed(); } if (ret > 0) { @@ -1810,13 +1845,13 @@ int Daemon::answer_client_requests() if (scheduler && FD_ISSET(scheduler->fd, &listen_set)) { while (!scheduler->read_a_bit() || scheduler->has_msg()) { - Msg *msg = scheduler->get_msg(); + Msg *msg = scheduler->get_msg(0, true); if (!msg) { log_error() << "scheduler closed connection" << endl; close_scheduler(); clear_children(); - return 1; + return; } ret = 0; @@ -1849,7 +1884,8 @@ int Daemon::answer_client_requests() delete msg; if (ret) { - return ret; + close_scheduler(); + return; } } } @@ -1875,13 +1911,13 @@ int Daemon::answer_client_requests() if (acc_fd == -1 && errno != EINTR) { log_perror("accept failed:"); - return EXIT_CONNECT_FAILED; + return; } MsgChannel *c = Service::createChannel(acc_fd, &cli_addr, cli_len); if (!c) { - return 0; + return; } trace() << "accepted " << c->fd << " " << c->name << endl; @@ -1918,7 +1954,7 @@ int Daemon::answer_client_requests() max_fd--; if (!handle_compile_done(client)) { - return 1; + return; } } @@ -1956,12 +1992,10 @@ int Daemon::answer_client_requests() if (had_scheduler && !scheduler) { clear_children(); - return 2; + return; } } - - return 0; } bool Daemon::reconnect() @@ -1971,7 +2005,7 @@ bool Daemon::reconnect() } if (!discover && next_scheduler_connect > time(0)) { - trace() << "timeout.." << endl; + trace() << "Delaying reconnect." << endl; return false; } @@ -1985,7 +2019,7 @@ bool Daemon::reconnect() } if (!scheduler) { - log_warning() << "scheduler not yet found." << endl; + log_warning() << "scheduler not yet found/selected." << endl; return false; } @@ -2017,13 +2051,7 @@ int Daemon::working_loop() { for (;;) { reconnect(); - - int ret = answer_client_requests(); - - if (ret) { - trace() << "answer_client_requests returned " << ret << endl; - close_scheduler(); - } + answer_client_requests(); if (exit_main_loop) { close_scheduler(); @@ -2132,14 +2160,8 @@ int main(int argc, char **argv) break; case 'v': - if (debug_level & Warning) - if (debug_level & Info) { // for second call - debug_level |= Debug; - } else { - debug_level |= Info; - } - else { - debug_level |= Warning; + if (debug_level < MaxVerboseLevel) { + debug_level++; } break; @@ -2227,7 +2249,7 @@ int main(int argc, char **argv) } if (d.warn_icecc_user_errno != 0) { - log_errno("Error: no icecc user on system. Falling back to nobody.", d.warn_icecc_user_errno); + log_errno("No icecc user on system. Falling back to nobody.", d.warn_icecc_user_errno); } umask(022); diff --git a/daemon/ncpus.c b/daemon/ncpus.c index 91c1fa56c..d1c7bd7cf 100644 --- a/daemon/ncpus.c +++ b/daemon/ncpus.c @@ -64,36 +64,6 @@ int dcc_ncpus(int *ncpus) } -#elif defined(__VOS__) - -#ifdef __GNUC__ -#define $shortmap -#endif - -#include - -extern void s$get_module_info(char_varying *module_name, void *mip, - short int *code); - -int dcc_ncpus(int *ncpus) -{ - short int code; - module_info mi; - char_varying(66) module_name; - - strcpy_vstr_nstr(&module_name, ""); - mi.version = MODULE_INFO_VERSION_1; - s$get_module_info((char_varying *)&module_name, (void *)&mi, &code); - - if (code != 0) { - *ncpus = 1; /* safe guess... */ - } else { - *ncpus = mi.n_user_cpus; - } - - return 0; -} - #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__) || defined(__bsdi__) || defined(__DragonFly__) /* http://www.FreeBSD.org/cgi/man.cgi?query=sysctl&sektion=3&manpath=FreeBSD+4.6-stable diff --git a/daemon/serve.cpp b/daemon/serve.cpp index b8943b4ef..527aeb68c 100644 --- a/daemon/serve.cpp +++ b/daemon/serve.cpp @@ -158,7 +158,7 @@ int handle_connection(const string &basedir, CompileJob *job, return pid; } - reset_debug(0); + reset_debug(); if ((-1 == close(socket[0])) && (errno != EBADF)){ log_perror("close failed"); } @@ -173,17 +173,18 @@ int handle_connection(const string &basedir, CompileJob *job, << endl; } - Msg *msg = 0; // The current read message string tmp_path, obj_file, dwo_file; + int exit_code = 0; try { if (job->environmentVersion().size()) { string dirname = basedir + "/target=" + job->targetPlatform() + "/" + job->environmentVersion(); - if (::access(string(dirname + "/usr/bin/as").c_str(), X_OK)) { - error_client(client, dirname + "/usr/bin/as is not executable"); + if (::access(string(dirname + "/usr/bin/as").c_str(), X_OK) < 0) { + error_client(client, dirname + "/usr/bin/as is not executable, installed environment removed?"); log_error() << "I don't have environment " << job->environmentVersion() << "(" << job->targetPlatform() << ") " << job->jobID() << endl; - throw myexception(EXIT_DISTCC_FAILED); // the scheduler didn't listen to us! + // The scheduler didn't listen to us, or maybe something has removed the files. + throw myexception(EXIT_DISTCC_FAILED); } chdir_to_environment(client, dirname, user_uid, user_gid); @@ -193,7 +194,7 @@ int handle_connection(const string &basedir, CompileJob *job, throw myexception(EXIT_DISTCC_FAILED); } - if (::access(_PATH_TMP + 1, W_OK)) { + if (::access(_PATH_TMP + 1, W_OK) < 0) { error_client(client, "can't write to " _PATH_TMP); log_error() << "can't write into " << _PATH_TMP << " " << strerror(errno) << endl; throw myexception(-1); @@ -208,7 +209,7 @@ int handle_connection(const string &basedir, CompileJob *job, char *tmp_output = 0; char prefix_output[32]; // 20 for 2^64 + 6 for "icecc-" + 1 for trailing NULL - sprintf(prefix_output, "icecc-%d", job_id); + sprintf(prefix_output, "icecc-%u", job_id); if (job->dwarfFissionEnabled() && (ret = dcc_make_tmpdir(&tmp_output)) == 0) { tmp_path = tmp_output; @@ -263,7 +264,7 @@ int handle_connection(const string &basedir, CompileJob *job, ret = work_it(*job, job_stat, client, rmsg, tmp_path, job_working_dir, relative_file_path, mem_limit, client->fd); } - else if ((ret = dcc_make_tmpnam(prefix_output, ".o", &tmp_output, 0)) == 0) { + else if (!job->dwarfFissionEnabled() && (ret = dcc_make_tmpnam(prefix_output, ".o", &tmp_output, 0)) == 0) { obj_file = tmp_output; free(tmp_output); string build_path = obj_file.substr(0, obj_file.find_last_of('/')); @@ -275,6 +276,11 @@ int handle_connection(const string &basedir, CompileJob *job, if (ret) { if (ret == EXIT_OUT_OF_MEMORY) { // we catch that as special case rmsg.was_out_of_memory = true; + } else if (ret == EXIT_IO_ERROR) { + // This was probably running out of disk space. + // Fake that as running out of memory, since it's in practice + // a very similar problem. + rmsg.was_out_of_memory = true; } else { throw myexception(ret); } @@ -310,17 +316,17 @@ int handle_connection(const string &basedir, CompileJob *job, throw myexception(rmsg.status); - } catch (myexception e) { + } catch (const myexception& e) { delete client; client = 0; if (!obj_file.empty()) { - if (-1 == unlink(obj_file.c_str())){ + if (-1 == unlink(obj_file.c_str()) && errno != ENOENT){ log_perror("unlink failure") << "\t" << obj_file << endl; } } if (!dwo_file.empty()) { - if (-1 == unlink(dwo_file.c_str())){ + if (-1 == unlink(dwo_file.c_str()) && errno != ENOENT){ log_perror("unlink failure") << "\t" << dwo_file << endl; } } @@ -328,9 +334,9 @@ int handle_connection(const string &basedir, CompileJob *job, rmpath(tmp_path.c_str()); } - delete msg; delete job; - _exit(e.exitcode()); + exit_code = e.exitcode(); } + _exit(exit_code); } diff --git a/daemon/workit.cpp b/daemon/workit.cpp index 21caf2da1..debf9da24 100644 --- a/daemon/workit.cpp +++ b/daemon/workit.cpp @@ -47,6 +47,10 @@ #endif #include +#ifdef HAVE_SYS_VFS_H +#include +#endif + #if defined(__FreeBSD__) || defined(__DragonFly__) || defined(__APPLE__) #ifndef RUSAGE_SELF #define RUSAGE_SELF (0) @@ -106,6 +110,16 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR list.push_back("-gsplit-dwarf"); } + trace() << "remote compile for file " << j.inputFile() << endl; + + string argstxt; + for (std::list::const_iterator it = list.begin(); + it != list.end(); ++it) { + argstxt += ' '; + argstxt += *it; + } + trace() << "remote compile arguments:" << argstxt << endl; + int sock_err[2]; int sock_out[2]; int sock_in[2]; @@ -179,21 +193,36 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR } #ifdef RLIMIT_AS - struct rlimit rlim; - if (getrlimit(RLIMIT_AS, &rlim)) { - error_client(client, "getrlimit failed."); - log_perror("getrlimit"); - } +// Sanitizers use huge amounts of virtual memory and the setrlimit() call below +// may lead to the process getting killed at any moment without any warning +// or message. Both gcc's and clang's macros are unreliable (no way to detect -fsanitize=leak, +// for example), but hopefully with the configure check this is good enough. +#ifndef SANITIZER_USED +#ifdef __SANITIZE_ADDRESS__ +#define SANITIZER_USED +#endif +#if defined(__has_feature) +#if __has_feature(address_sanitizer) +#define SANITIZER_USED +#endif +#endif +#endif - rlim.rlim_cur = mem_limit * 1024 * 1024; - rlim.rlim_max = mem_limit * 1024 * 1024; +#ifndef SANITIZER_USED + struct rlimit rlim; + + rlim_t lim = mem_limit * 1024 * 1024; + rlim.rlim_cur = lim; + rlim.rlim_max = lim; if (setrlimit(RLIMIT_AS, &rlim)) { error_client(client, "setrlimit failed."); log_perror("setrlimit"); + } else { + log_info() << "Compile job memory limit set to " << mem_limit << " megabytes" << endl; } - +#endif #endif int argc = list.size(); @@ -221,7 +250,18 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR } argv[i++] = strdup("-x"); - argv[i++] = strdup((j.language() == CompileJob::Lang_CXX) ? "c++" : "c"); + if (j.language() == CompileJob::Lang_C) { + argv[i++] = strdup("c"); + } else if (j.language() == CompileJob::Lang_CXX) { + argv[i++] = strdup("c++"); + } else if (j.language() == CompileJob::Lang_OBJC) { + argv[i++] = strdup("objective-c"); + } else if (j.language() == CompileJob::Lang_OBJCXX) { + argv[i++] = strdup("objective-c++"); + } else { + error_client(client, "language not supported"); + log_perror("language not supported"); + } if( clang ) { // gcc seems to handle setting main file name and working directory fine @@ -290,6 +330,15 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR argv[i] = 0; assert(i <= argc); + argstxt.clear(); + for (int pos = 1; + pos < i; + ++pos ) { + argstxt += ' '; + argstxt += argv[pos]; + } + trace() << "final arguments:" << argstxt << endl; + close_debug(); if ((-1 == close(sock_out[0])) && (errno != EBADF)){ @@ -344,8 +393,8 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR #ifdef ICECC_DEBUG for (int f = STDERR_FILENO + 1; f < 4096; ++f) { - long flags; - assert((flags = fcntl(f, F_GETFD, 0)) < 0 || (flags & FD_CLOEXEC)); + long flags = fcntl(f, F_GETFD, 0); + assert(flags < 0 || (flags & FD_CLOEXEC)); } #endif @@ -412,7 +461,7 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR for (;;) { if (client_fd >= 0 && !fcmsg) { - if (Msg *msg = client->get_msg(0)) { + if (Msg *msg = client->get_msg(0, true)) { if (input_complete) { rmsg.err.append("client cancelled\n"); return_value = EXIT_CLIENT_KILLED; @@ -441,6 +490,7 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR job_stat[JobStatistics::in_compressed] += fcmsg->compressed; } else { log_error() << "protocol error while reading preprocessed file" << endl; + input_complete = true; return_value = EXIT_IO_ERROR; client_fd = -1; kill(pid, SIGTERM); @@ -451,6 +501,7 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR } } else if (client->at_eof()) { log_error() << "unexpected EOF while reading preprocessed file" << endl; + input_complete = true; return_value = EXIT_IO_ERROR; client_fd = -1; kill(pid, SIGTERM); @@ -642,15 +693,30 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR if (((mem_used * 100) > (85 * mem_limit * 1024)) || (rmsg.err.find("memory exhausted") != string::npos) - || (rmsg.err.find("out of memory allocating") != string::npos) + || (rmsg.err.find("out of memory") != string::npos) || (rmsg.err.find("annot allocate memory") != string::npos) || (rmsg.err.find("failed to map segment from shared object") != string::npos) || (rmsg.err.find("Assertion `NewElts && \"Out of memory\"' failed") != string::npos) || (rmsg.err.find("terminate called after throwing an instance of 'std::bad_alloc'") != string::npos) || (rmsg.err.find("llvm::MallocSlabAllocator::Allocate") != string::npos)) { // the relation between ulimit and memory used is pretty thin ;( + log_warning() << "Remote compilation failed, presumably because of running out of memory (exit code " + << shell_exit_status(status) << ")" << endl; return EXIT_OUT_OF_MEMORY; } + +#ifdef HAVE_SYS_VFS_H + struct statfs buf; + int ret = statfs( "/", &buf); + // If there's less than 10MiB of disk space free, we're probably running out of disk space. + if ((ret == 0 && long(buf.f_bavail) < ((10 * 1024 * 1024) / buf.f_bsize)) + || rmsg.err.find("o space left on device") != string::npos) { + log_warning() << "Remote compilation failed, presumably because of running out of disk space (exit code " + << shell_exit_status(status) << ")" << endl; + return EXIT_IO_ERROR; + } +#endif + } if (WIFEXITED(status)) { @@ -666,6 +732,13 @@ int work_it(CompileJob &j, unsigned int job_stat[], MsgChannel *client, CompileR job_stat[JobStatistics::sys_msec] = (ru.ru_stime.tv_sec * 1000) + (ru.ru_stime.tv_usec / 1000); job_stat[JobStatistics::sys_pfaults] = ru.ru_majflt + ru.ru_nswap + ru.ru_minflt; + if(rmsg.status != 0) { + log_warning() << "Remote compilation exited with exit code " << shell_exit_status(status) << endl; + } else { + log_info() << "Remote compilation completed with exit code " << shell_exit_status(status) << endl; + } + } else { + log_warning() << "Remote compilation aborted with exit code " << shell_exit_status(status) << endl; } return return_value; diff --git a/doc/man-icecc-create-env.1.xml b/doc/man-icecc-create-env.1.xml index acb248cfb..3e03c7350 100644 --- a/doc/man-icecc-create-env.1.xml +++ b/doc/man-icecc-create-env.1.xml @@ -34,12 +34,7 @@ icecc-create-env ---gcc gcc-path g++-path ---addfile file - - -icecc-create-env ---clang clang-path compiler-wrapper +compiler-binary --addfile file @@ -49,8 +44,6 @@ &icecc-create-env; is an Icecream helper that creates a new .tar.gz archive with all the files (compiler, tools and libraries) needed to setup a build environment. -Specifying or as first -argument decides which compiler (&gcc; or &clang;) is copied in the archive. The resulting archive has a random file name like ddaea39ca1a7c88522b185eca04da2d8.tar.gz, which can then be renamed. See icecream(7) for more information on using the environment @@ -64,27 +57,6 @@ called automatically for the native compiler used whenever necessary. - - -gcc-path g++-path -&gcc; is used in the archive. -gcc-path -and g++-path represent respectively the paths to the -gcc and g++ executables. - - - - - -clang-path compiler-wrapper -&clang; is used in the archive. -clang-path represents the path to the -clang executable; compiler-wrapper -is the path to a compiler wrapper, usually /usr/lib/icecc/compilerwrapper, -used with old Icecream clients which hardcode /usr/bin/gcc -and /usr/bin/g++ and thus would not use &clang;. - - file diff --git a/doc/man-icecream.7.xml b/doc/man-icecream.7.xml index f32ce9f91..96b2b9125 100644 --- a/doc/man-icecream.7.xml +++ b/doc/man-icecream.7.xml @@ -189,12 +189,10 @@ for the same host architecture: Create a tarball file for each toolchain that you want to use with - icecream. The /usr/lib/icecc/icecc-create-env script can be used to + icecream. The icecc-create-env script can be used to create the tarball file for each toolchain, for example: - /usr/lib/icecc/icecc-create-env --gcc /work/toolchain1/bin/arm-eabi-gcc - /work/toolchain1/bin/arm-eabi-g++ - /usr/lib/icecc/icecc-create-env --gcc /work/toolchain2/bin/arm-linux-androideabi-gcc - /work/toolchain2/bin/arm-linux-androideabi-gcc. + icecc-create-env /work/toolchain1/bin/arm-eabi-gcc + icecc-create-env /work/toolchain2/bin/arm-linux-androideabi-gcc. Set ICECC_VERSION to point to the native tarball file and for each @@ -379,7 +377,7 @@ has to wait on the slow machine. Keep that in mind. If the monitor cannot find the scheduler, use -USE_SCHEDULER=host icemon. +ICECC_SCHEDULER=host icemon. diff --git a/scheduler/compileserver.cpp b/scheduler/compileserver.cpp index 6c003bb4b..5d877d0f5 100644 --- a/scheduler/compileserver.cpp +++ b/scheduler/compileserver.cpp @@ -48,10 +48,12 @@ CompileServer::CompileServer(const int fd, struct sockaddr *_addr, const socklen , m_maxJobs(0) , m_noRemote(false) , m_jobList() - , m_submittedJobsCount(0) , m_state(CONNECTED) , m_type(UNKNOWN) , m_chrootPossible(false) + , m_clientCount(0) + , m_submittedJobsCount(0) + , m_lastPickId(0) , m_compilerVersions() , m_lastCompiledJobs() , m_lastRequestedJobs() @@ -257,6 +259,7 @@ list CompileServer::jobList() const void CompileServer::appendJob(Job *job) { + m_lastPickId = job->id(); m_jobList.push_back(job); } @@ -265,19 +268,9 @@ void CompileServer::removeJob(Job *job) m_jobList.remove(job); } -int CompileServer::submittedJobsCount() const -{ - return m_submittedJobsCount; -} - -void CompileServer::submittedJobsIncrement() -{ - m_submittedJobsCount++; -} - -void CompileServer::submittedJobsDecrement() +unsigned int CompileServer::lastPickedId() { - m_submittedJobsCount--; + return m_lastPickId; } CompileServer::State CompileServer::state() const @@ -310,6 +303,31 @@ void CompileServer::setChrootPossible(const bool possible) m_chrootPossible = possible; } +int CompileServer::clientCount() const +{ + return m_clientCount; +} + +void CompileServer::setClientCount( int clientCount ) +{ + m_clientCount = clientCount; +} + +int CompileServer::submittedJobsCount() const +{ + return m_submittedJobsCount; +} + +void CompileServer::submittedJobsIncrement() +{ + m_submittedJobsCount++; +} + +void CompileServer::submittedJobsDecrement() +{ + m_submittedJobsCount--; +} + Environments CompileServer::compilerVersions() const { return m_compilerVersions; diff --git a/scheduler/compileserver.h b/scheduler/compileserver.h index 5d43bc9d2..249a5ebd2 100644 --- a/scheduler/compileserver.h +++ b/scheduler/compileserver.h @@ -89,10 +89,7 @@ class CompileServer : public MsgChannel list jobList() const; void appendJob(Job *job); void removeJob(Job *job); - - int submittedJobsCount() const; - void submittedJobsIncrement(); - void submittedJobsDecrement(); + unsigned int lastPickedId(); State state() const; void setState(const State state); @@ -103,6 +100,12 @@ class CompileServer : public MsgChannel bool chrootPossible() const; void setChrootPossible(const bool possible); + int clientCount() const; + void setClientCount( int clientCount ); + int submittedJobsCount() const; + void submittedJobsIncrement(); + void submittedJobsDecrement(); + Environments compilerVersions() const; void setCompilerVersions(const Environments &environments); @@ -155,10 +158,12 @@ class CompileServer : public MsgChannel int m_maxJobs; bool m_noRemote; list m_jobList; - int m_submittedJobsCount; State m_state; Type m_type; bool m_chrootPossible; + int m_clientCount; // number of client connections the daemon has + int m_submittedJobsCount; + unsigned int m_lastPickId; Environments m_compilerVersions; // Available compilers diff --git a/scheduler/job.cpp b/scheduler/job.cpp index a5c3a82b5..88a7e6a00 100644 --- a/scheduler/job.cpp +++ b/scheduler/job.cpp @@ -58,11 +58,6 @@ unsigned int Job::id() const return m_id; } -void Job::setId(const unsigned int id) -{ - m_id = id; -} - unsigned int Job::localClientId() const { return m_localClientId; diff --git a/scheduler/job.h b/scheduler/job.h index 9cab679a5..f89035a85 100644 --- a/scheduler/job.h +++ b/scheduler/job.h @@ -45,7 +45,6 @@ class Job ~Job(); unsigned int id() const; - void setId(const unsigned int id); unsigned int localClientId() const; void setLocalClientId(const unsigned int id); @@ -95,7 +94,7 @@ class Job void setMinimalHostVersion( int version ); private: - unsigned int m_id; + const unsigned int m_id; unsigned int m_localClientId; State m_state; CompileServer *m_server; // on which server we build diff --git a/scheduler/scheduler.cpp b/scheduler/scheduler.cpp old mode 100755 new mode 100644 index 524e8c4fe..8941aa642 --- a/scheduler/scheduler.cpp +++ b/scheduler/scheduler.cpp @@ -52,11 +52,13 @@ #include "../services/comm.h" #include "../services/logging.h" #include "../services/job.h" +#include "../services/util.h" #include "config.h" #include "compileserver.h" #include "job.h" +// Values 0 to 3. #define DEBUG_SCHEDULER 0 /* TODO: @@ -120,8 +122,7 @@ static list toanswer; static list all_job_stats; static JobStat cum_job_stats; -static float server_speed(CompileServer *cs, Job *job = 0); -static void broadcast_scheduler_version(const char* netname); +static float server_speed(CompileServer *cs, Job *job = 0, bool blockDebug = false); /* Searches the queue for JOB and removes it. Returns true if something was deleted. */ @@ -220,7 +221,7 @@ static void add_job_stats(Job *job, JobDoneMsg *msg) << " " << st.outputSize() << " " << msg->out_uncompressed << " " << job->server()->nodeName() << " " << float(msg->out_uncompressed) / st.compileTimeUser() << " " - << server_speed(job->server()) << endl; + << server_speed(job->server(), NULL, true) << endl; } #endif } @@ -245,8 +246,11 @@ static void notify_monitors(Msg *m) delete m; } -static float server_speed(CompileServer *cs, Job *job) +static float server_speed(CompileServer *cs, Job *job, bool blockDebug) { +#if DEBUG_SCHEDULER <= 2 + (void)blockDebug; +#endif if (cs->lastCompiledJobs().size() == 0 || cs->cumCompiled().compileTimeUser() == 0) { return 0; } else { @@ -256,21 +260,57 @@ static float server_speed(CompileServer *cs, Job *job) // we only care for the load if we're about to add a job to it if (job) { if (job->submitter() == cs) { - /* The submitter of a job gets more speed if it's capable of handling its requests on its own. - So if he is equally fast to the rest of the farm it will be preferred to chose him - to compile the job. Then this can be done locally without needing the preprocessor. - However if there are more requests than the number of jobs the submitter can handle, - it is assumed the submitter is doing a massively parallel build, in which case it is - better not to build on the submitter and let it do other work (such as preprocessing - output for other nodes) that can be done only locally. */ - if (cs->submittedJobsCount() <= cs->maxJobs()) { + int clientCount = cs->clientCount(); + if( clientCount == 0 ) { + // Older client/daemon that doesn't send client count. Use the number of jobs + // that we've already been told about as the fallback value (it will sometimes + // be an underestimate). + clientCount = cs->submittedJobsCount(); + } + if (clientCount > cs->maxJobs()) { + // The submitter would be overloaded by building all its jobs locally, + // so penalize it heavily in order to send jobs preferably to other nodes, + // so that the submitter should preferably do tasks that cannot be distributed, + // such as linking or preparing jobs for remote nodes. + f *= 0.1; +#if DEBUG_SCHEDULER > 2 + if(!blockDebug) + log_info() << "penalizing local build for job " << job->id() << endl; +#endif + } else if (clientCount == cs->maxJobs()) { + // This means the submitter would be fully loaded by its jobs. It is still + // preferable to distribute the job, unless the submitter is noticeably faster. + f *= 0.8; +#if DEBUG_SCHEDULER > 2 + if(!blockDebug) + log_info() << "slightly penalizing local build for job " << job->id() << endl; +#endif + } + else if (clientCount <= cs->maxJobs() / 2) { + // The submitter has only few jobs, slightly prefer building the job locally + // in order to save the overhead of distributing. + // Note that this is unreliable, the submitter may be in fact running a large + // parallel build but this is just the first of the jobs and other icecc instances + // haven't been launched yet. There's probably no good way to detect this reliably. f *= 1.1; +#if DEBUG_SCHEDULER > 2 + if(!blockDebug) + log_info() << "slightly preferring local build for job " << job->id() << endl; +#endif } else { - f *= 0.1; // penalize heavily + // the remaining case, don't adjust + f *= 1; } - } else { // ignoring load for submitter - assuming the load is our own + // ignoring load for submitter - assuming the load is our own + } else { f *= float(1000 - cs->load()) / 1000; } + + /* Gradually throttle with the number of assigned jobs. This + * takes care of the fact that not all slots are equally fast on + * CPUs with SMT and dynamic clock ramping. + */ + f *= (1.0f - (0.5f * cs->jobList().size() / cs->maxJobs())); } // below we add a pessimism factor - assuming the first job a computer got is not representative @@ -306,16 +346,16 @@ static void handle_monitor_stats(CompileServer *cs, StatsMsg *m = 0) if (m) { sprintf(buffer, "Load:%d\n", m->load); msg += buffer; - sprintf(buffer, "LoadAvg1:%d\n", m->loadAvg1); + sprintf(buffer, "LoadAvg1:%u\n", m->loadAvg1); msg += buffer; - sprintf(buffer, "LoadAvg5:%d\n", m->loadAvg5); + sprintf(buffer, "LoadAvg5:%u\n", m->loadAvg5); msg += buffer; - sprintf(buffer, "LoadAvg10:%d\n", m->loadAvg10); + sprintf(buffer, "LoadAvg10:%u\n", m->loadAvg10); msg += buffer; - sprintf(buffer, "FreeMem:%d\n", m->freeMem); + sprintf(buffer, "FreeMem:%u\n", m->freeMem); msg += buffer; } else { - sprintf(buffer, "Load:%d\n", cs->load()); + sprintf(buffer, "Load:%u\n", cs->load()); msg += buffer; } @@ -385,6 +425,8 @@ static bool handle_cs_request(MsgChannel *cs, Msg *_m) CompileServer *submitter = static_cast(cs); + submitter->setClientCount(m->client_count); + Job *master_job = 0; for (unsigned int i = 0; i < m->count; ++i) { @@ -392,7 +434,26 @@ static bool handle_cs_request(MsgChannel *cs, Msg *_m) job->setEnvironments(m->versions); job->setTargetPlatform(m->target); job->setArgFlags(m->arg_flags); - job->setLanguage((m->lang == CompileJob::Lang_C) ? "C" : "C++"); + switch(m->lang) { + case CompileJob::Lang_C: + job->setLanguage("C"); + break; + case CompileJob::Lang_CXX: + job->setLanguage("C++"); + break; + case CompileJob::Lang_OBJC: + job->setLanguage("ObjC"); + break; + case CompileJob::Lang_OBJCXX: + job->setLanguage("ObjC++"); + break; + case CompileJob::Lang_Custom: + job->setLanguage(""); + break; + default: + job->setLanguage("???"); // presumably newer client? + break; + } job->setFileName(m->filename); job->setLocalClientId(m->client_id); job->setPreferredHost(m->preferred_host); @@ -557,18 +618,6 @@ static CompileServer *pick_server(Job *job) return 0; } - /* Now guess about the job. First see, if this submitter already - had other jobs. Use them as base. */ - JobStat guess; - - if (job->submitter()->lastRequestedJobs().size() > 0) { - guess = job->submitter()->cumRequested() - / job->submitter()->lastRequestedJobs().size(); - } else { - /* Otherwise simply average over all jobs. */ - guess = cum_job_stats / all_job_stats.size(); - } - CompileServer *best = 0; // best uninstalled CompileServer *bestui = 0; @@ -619,8 +668,9 @@ static CompileServer *pick_server(Job *job) #if DEBUG_SCHEDULER > 1 trace() << cs->nodeName() << " compiled " << cs->lastCompiledJobs().size() << " got now: " << - cs->jobList().size() << " speed: " << server_speed(cs, job) << " compile time " << - cs->cumCompiled().compileTimeUser() << " produced code " << cs->cumCompiled().outputSize() << endl; + cs->jobList().size() << " speed: " << server_speed(cs, job, true) << " compile time " << + cs->cumCompiled().compileTimeUser() << " produced code " << cs->cumCompiled().outputSize() << + " client count: " << cs->clientCount() << endl; #endif if ((cs->lastCompiledJobs().size() == 0) && (cs->jobList().size() == 0) && cs->maxJobs()) { @@ -638,6 +688,15 @@ static CompileServer *pick_server(Job *job) break; } + /* Distribute 5% of our jobs to servers which haven't been picked in a + long time. This gives us a chance to adjust the server speed rating, + which may change due to external influences out of our control. */ + if (!cs->lastPickedId() || + ((job->id() - cs->lastPickedId()) > (20 * css.size()))) { + best = cs; + break; + } + if (!envs_match(cs, job).empty()) { if (!best) { best = cs; @@ -671,29 +730,23 @@ static CompileServer *pick_server(Job *job) } } - // to make sure we find the fast computers at least after some time, we overwrite - // the install rule for every 19th job - if the farm is only filled a bit - if (bestui && ((matches < 11) && (matches < (css.size() / 3))) && ((job->id() % 19) != 0)) { - best = 0; - } - if (best) { #if DEBUG_SCHEDULER > 1 - trace() << "taking best installed " << best->nodeName() << " " << server_speed(best, job) << endl; + trace() << "taking best installed " << best->nodeName() << " " << server_speed(best, job, true) << endl; #endif return best; } if (bestui) { #if DEBUG_SCHEDULER > 1 - trace() << "taking best uninstalled " << bestui->nodeName() << " " << server_speed(bestui, job) << endl; + trace() << "taking best uninstalled " << bestui->nodeName() << " " << server_speed(bestui, job, true) << endl; #endif return bestui; } if (bestpre) { #if DEBUG_SCHEDULER > 1 - trace() << "taking best preload " << bestui->nodeName() << " " << server_speed(bestui, job) << endl; + trace() << "taking best preload " << bestui->nodeName() << " " << server_speed(bestui, job, true) << endl; #endif } @@ -1070,6 +1123,8 @@ static bool handle_job_begin(CompileServer *cs, Msg *_m) return false; } + cs->setClientCount(m->client_count); + job->setState(Job::COMPILING); job->setStartTime(m->stime); job->setStartOnScheduler(time(0)); @@ -1095,21 +1150,21 @@ static bool handle_job_done(CompileServer *cs, Msg *_m) Job *j = 0; - if (m->exitcode == CLIENT_WAS_WAITING_FOR_CS) { - // the daemon saw a cancel of what he believes is waiting in the scheduler + if (uint32_t clientId = m->unknown_job_client_id()) { + // The daemon has sent a done message for a job for which it doesn't know the job id (happens + // if the job is cancelled before we send back the job id). Find the job using the client id. map::iterator mit; for (mit = jobs.begin(); mit != jobs.end(); ++mit) { Job *job = mit->second; trace() << "looking for waitcs " << job->server() << " " << job->submitter() << " " << cs - << " " << job->state() << " " << job->localClientId() << " " << m->job_id + << " " << job->state() << " " << job->localClientId() << " " << clientId << endl; - if (job->server() == 0 && job->submitter() == cs && job->state() == Job::PENDING - && job->localClientId() == m->job_id) { + if (job->server() == 0 && job->submitter() == cs && job->localClientId() == clientId) { trace() << "STOP (WAITFORCS) FOR " << mit->first << endl; j = job; - m->job_id = j->id(); // that's faked + m->set_job_id( j->id()); // Now we know the job's id. /* Unfortunately the toanswer queues are also tagged based on the daemon, so we need to clean them up also. */ @@ -1143,11 +1198,6 @@ static bool handle_job_done(CompileServer *cs, Msg *_m) return false; } - if (j->state() == Job::PENDING) { - trace() << "job ID still pending ?! scheduler recently restarted? " << m->job_id << endl; - return false; - } - if (m->is_from_server() && (j->server() != cs)) { log_info() << "the server isn't the same for job " << m->job_id << endl; log_info() << "server: " << j->server()->nodeName() << endl; @@ -1166,6 +1216,8 @@ static bool handle_job_done(CompileServer *cs, Msg *_m) return false; } + cs->setClientCount(m->client_count); + if (m->exitcode == 0) { std::ostream &dbg = trace(); dbg << "END " << m->job_id @@ -1240,6 +1292,7 @@ static bool handle_stats(CompileServer *cs, Msg *_m) for (list::iterator it = css.begin(); it != css.end(); ++it) if (*it == cs) { (*it)->setLoad(m->load); + (*it)->setClientCount(m->client_count); handle_monitor_stats(*it, m); return true; } @@ -1284,7 +1337,7 @@ static string dump_job(Job *job) default: jobState = "Huh?"; } - snprintf(buffer, sizeof(buffer), "%d %s sub:%s on:%s ", + snprintf(buffer, sizeof(buffer), "%u %s sub:%s on:%s ", job->id(), jobState.c_str(), job->submitter() ? job->submitter()->nodeName().c_str() : "<>", @@ -1347,7 +1400,6 @@ static bool handle_line(CompileServer *cs, Msg *_m) return false; } - char buffer[1000]; string line; list l; split_string(m->text, " \t\n", l); @@ -1365,6 +1417,7 @@ static bool handle_line(CompileServer *cs, Msg *_m) if (cmd == "listcs") { for (list::iterator it = css.begin(); it != css.end(); ++it) { + char buffer[1000]; sprintf(buffer, " (%s:%d) ", (*it)->name.c_str(), (*it)->remotePort()); line = " " + (*it)->nodeName() + buffer; line += "[" + (*it)->hostPlatform() + "] speed="; @@ -1622,7 +1675,7 @@ static bool handle_activity(CompileServer *cs) { Msg *m; bool ret = true; - m = cs->get_msg(0); + m = cs->get_msg(0, true); if (!m) { handle_end(cs, m); @@ -1751,48 +1804,6 @@ static int open_tcp_listener(short port) return fd; } -#define BROAD_BUFLEN 268 -#define BROAD_BUFLEN_OLD 16 -static int prepare_broadcast_reply(char* buf, const char* netname) -{ - if (buf[0] < 33) { // old client - buf[0]++; - memset(buf + 1, 0, BROAD_BUFLEN_OLD - 1); - snprintf(buf + 1, BROAD_BUFLEN_OLD - 1, "%s", netname); - buf[BROAD_BUFLEN_OLD - 1] = 0; - return BROAD_BUFLEN_OLD; - } else { // net client - buf[0] += 2; - memset(buf + 1, 0, BROAD_BUFLEN - 1); - uint32_t tmp_version = PROTOCOL_VERSION; - uint64_t tmp_time = starttime; - memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); - memcpy(buf + 1 + sizeof(uint32_t), &tmp_time, sizeof(uint64_t)); - const int OFFSET = 1 + sizeof(uint32_t) + sizeof(uint64_t); - snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); - buf[BROAD_BUFLEN - 1] = 0; - return BROAD_BUFLEN; - } -} - -static void broadcast_scheduler_version(const char* netname) -{ - const char length_netname = strlen(netname); - const int schedbuflen = 5 + sizeof(uint64_t) + length_netname; - char *buf = new char[ schedbuflen ]; - buf[0] = 'I'; - buf[1] = 'C'; - buf[2] = 'E'; - buf[3] = PROTOCOL_VERSION; - uint64_t tmp_time = starttime; - memcpy(buf + 4, &tmp_time, sizeof(uint64_t)); - buf[4 + sizeof(uint64_t)] = length_netname; - strncpy(buf + 5 + sizeof(uint64_t), netname, length_netname); - DiscoverSched::broadcastData(scheduler_port, buf, schedbuflen); - delete[] buf; - buf = 0; -} - static void usage(const char *reason = 0) { if (reason) { @@ -1822,7 +1833,7 @@ static void trigger_exit(int signum) } else { // hmm, we got killed already. try better static const char msg[] = "forced exit.\n"; - write(STDERR_FILENO, msg, strlen( msg )); + ignore_result(write(STDERR_FILENO, msg, strlen( msg ))); _exit(1); } @@ -1834,26 +1845,26 @@ static void handle_scheduler_announce(const char* buf, const char* netname, bool { /* Another scheduler is announcing it's running, disconnect daemons if it has a better version or the same version but was started earlier. */ - if (!persistent_clients){ - uint64_t tmp_time; - memcpy(&tmp_time, buf + 4, sizeof(uint64_t)); - time_t other_time = tmp_time; - const unsigned char other_scheduler_protocol = buf[3]; - if (other_scheduler_protocol >= 36) + time_t other_time; + int other_protocol_version; + string other_netname; + Broadcasts::getSchedulerVersionData(buf, &other_protocol_version, &other_time, &other_netname); + trace() << "Received scheduler announcement from " << inet_ntoa(broad_addr.sin_addr) + << ":" << ntohs(broad_addr.sin_port) + << " (version " << int(other_protocol_version) << ", netname " << other_netname << ")" << endl; + if (other_protocol_version >= 36) + { + if (other_netname == netname) { - const unsigned char recv_netname_len = buf[4 + sizeof(uint64_t)]; - string local_netname = netname; - string recv_netname = string(buf + 5 + sizeof(uint64_t), recv_netname_len); - if (recv_netname == local_netname) + if (other_protocol_version > PROTOCOL_VERSION || (other_protocol_version == PROTOCOL_VERSION && other_time < starttime)) { - if (other_scheduler_protocol > PROTOCOL_VERSION || (other_scheduler_protocol == PROTOCOL_VERSION && other_time < starttime)) - { + if (!persistent_clients){ + log_info() << "Scheduler from " << inet_ntoa(broad_addr.sin_addr) + << ":" << ntohs(broad_addr.sin_port) + << " (version " << int(other_protocol_version) << ") has announced itself as a preferred" + " scheduler, disconnecting all connections." << endl; if (!css.empty() || !monitors.empty()) { - log_info() << "Scheduler from " << inet_ntoa(broad_addr.sin_addr) - << ":" << ntohs(broad_addr.sin_port) - << " (version " << int(other_scheduler_protocol) << ") has announced itself as a preferred" - " scheduler, disconnecting all connections." << endl; while (!css.empty()) { handle_end(css.front(), NULL); @@ -1874,7 +1885,7 @@ int main(int argc, char *argv[]) int listen_fd, remote_fd, broad_fd, text_fd; struct sockaddr_in remote_addr; socklen_t remote_len; - char *netname = (char *)"ICECREAM"; + const char *netname = "ICECREAM"; bool detach = false; bool persistent_clients = false; int debug_level = Error; @@ -1912,7 +1923,7 @@ int main(int argc, char *argv[]) { 0, 0, 0, 0 } }; - const int c = getopt_long(argc, argv, "n:p:hl:vdr:u:r:", long_options, &option_index); + const int c = getopt_long(argc, argv, "n:p:hl:vdru:", long_options, &option_index); if (c == -1) { break; // eoo @@ -1938,14 +1949,8 @@ int main(int argc, char *argv[]) break; case 'v': - if (debug_level & Warning) { - if (debug_level & Info) { // for second call - debug_level |= Debug; - } else { - debug_level |= Info; - } - } else { - debug_level |= Warning; + if (debug_level < MaxVerboseLevel) { + debug_level++; } break; @@ -1999,7 +2004,7 @@ int main(int argc, char *argv[]) } if (warn_icecc_user_errno != 0) { - log_errno("Error: no icecc user on system. Falling back to nobody.", errno); + log_errno("No icecc user on system. Falling back to nobody.", errno); } if (getuid() == 0) { @@ -2040,7 +2045,10 @@ int main(int argc, char *argv[]) log_info() << "ICECREAM scheduler " VERSION " starting up, port " << scheduler_port << endl; if (detach) { - daemon(0, 0); + if (daemon(0, 0) != 0) { + log_errno("Failed to detach.", errno); + exit(1); + } } listen_fd = open_tcp_listener(scheduler_port); @@ -2067,6 +2075,8 @@ int main(int argc, char *argv[]) } starttime = time(0); + if( getenv( "ICECC_FAKE_STARTTIME" ) != NULL ) + starttime -= 1000; ofstream pidFile; string progName = argv[0]; @@ -2080,9 +2090,11 @@ int main(int argc, char *argv[]) signal(SIGINT, trigger_exit); signal(SIGALRM, trigger_exit); + log_info() << "scheduler ready" << endl; + time_t next_listen = 0; - broadcast_scheduler_version(netname); + Broadcasts::broadcastSchedulerVersion(scheduler_port, netname, starttime); last_announce = starttime; while (!exit_main_loop) { @@ -2098,7 +2110,7 @@ int main(int argc, char *argv[]) their daemons if we are the preferred scheduler (daemons with version new enough should automatically select the best scheduler, but old daemons connect randomly). */ if (last_announce + 120 < time(NULL)) { - broadcast_scheduler_version(netname); + Broadcasts::broadcastSchedulerVersion(scheduler_port, netname, starttime); last_announce = time(NULL); } @@ -2163,19 +2175,21 @@ int main(int argc, char *argv[]) } } - max_fd = select(max_fd + 1, &read_set, &write_set, NULL, &tv); + int active_fds = select(max_fd + 1, &read_set, &write_set, NULL, &tv); - if (max_fd < 0 && errno == EINTR) { + if (active_fds < 0 && errno == EINTR) { + reset_debug_if_needed(); // we possibly got SIGHUP continue; } + reset_debug_if_needed(); - if (max_fd < 0) { + if (active_fds < 0) { log_perror("select()"); return 1; } if (FD_ISSET(listen_fd, &read_set)) { - max_fd--; + active_fds--; bool pending_connections = true; while (pending_connections) { @@ -2218,8 +2232,8 @@ int main(int argc, char *argv[]) next_listen = time(0) + 1; } - if (max_fd && FD_ISSET(text_fd, &read_set)) { - max_fd--; + if (active_fds && FD_ISSET(text_fd, &read_set)) { + active_fds--; remote_len = sizeof(remote_addr); remote_fd = accept(text_fd, (struct sockaddr *) &remote_addr, @@ -2247,20 +2261,17 @@ int main(int argc, char *argv[]) } } - if (max_fd && FD_ISSET(broad_fd, &read_set)) { - max_fd--; - char buf[BROAD_BUFLEN + 1]; + if (active_fds && FD_ISSET(broad_fd, &read_set)) { + active_fds--; + char buf[Broadcasts::BROAD_BUFLEN + 1]; struct sockaddr_in broad_addr; socklen_t broad_len = sizeof(broad_addr); /* We can get either a daemon request for a scheduler (1 byte) or another scheduler announcing itself (4 bytes + time). */ - int schedbuflen = 4 + sizeof(uint64_t); - - int buflen = recvfrom(broad_fd, buf, BROAD_BUFLEN, 0, (struct sockaddr *) &broad_addr, + int buflen = recvfrom(broad_fd, buf, Broadcasts::BROAD_BUFLEN, 0, (struct sockaddr *) &broad_addr, &broad_len); - /* Daemon is searching for a scheduler, only answer if daemon would be able to talk to us. */ - if (buflen < 0 || buflen > BROAD_BUFLEN){ + if (buflen < 0 || buflen > Broadcasts::BROAD_BUFLEN){ int err = errno; log_perror("recvfrom()"); @@ -2273,42 +2284,27 @@ int main(int argc, char *argv[]) return -1; } } - if (buflen == 1) { - if (buf[0] >= MIN_PROTOCOL_VERSION){ + int daemon_version; + if (DiscoverSched::isSchedulerDiscovery(buf, buflen, &daemon_version)) { + /* Daemon is searching for a scheduler, only answer if daemon would be able to talk to us. */ + if ( daemon_version >= MIN_PROTOCOL_VERSION){ log_info() << "broadcast from " << inet_ntoa(broad_addr.sin_addr) << ":" << ntohs(broad_addr.sin_port) - << " (version " << int(buf[0]) << ")\n"; - int reply_len = prepare_broadcast_reply(buf, netname); + << " (version " << daemon_version << ")\n"; + int reply_len = DiscoverSched::prepareBroadcastReply(buf, netname, starttime); if (sendto(broad_fd, buf, reply_len, 0, (struct sockaddr *) &broad_addr, broad_len) != reply_len) { log_perror("sendto()"); } } } - else if(buflen >= schedbuflen && buf[0] == 'I' && buf[1] == 'C' && buf[2] == 'E') { - if(buf[3] > 35) - { - schedbuflen += 1 + buf[schedbuflen]; - } - if (buflen != schedbuflen){ - int err = errno; - log_perror("recvfrom()"); - - /* Some linux 2.6 kernels can return from select with - data available, and then return from read() with EAGAIN - even on a blocking socket (breaking POSIX). Happens - when the arriving packet has a wrong checksum. So - we ignore EAGAIN here, but still abort for all other errors. */ - if (err != EAGAIN) { - return -1; - } - } + else if(Broadcasts::isSchedulerVersion(buf, buflen)) { handle_scheduler_announce(buf, netname, persistent_clients, broad_addr); } } for (map::const_iterator it = fd2cs.begin(); - max_fd && it != fd2cs.end();) { + active_fds > 0 && it != fd2cs.end();) { int i = it->first; CompileServer *cs = it->second; /* handle_activity can delete the channel from the fd2cs list, @@ -2323,20 +2319,23 @@ int main(int argc, char *argv[]) } } - max_fd--; + active_fds--; } } for (list::const_iterator it = cs_in_tsts.begin(); it != cs_in_tsts.end(); ++it) { + if(find(css.begin(), css.end(), *it) == css.end()) { + continue; // deleted meanwhile + } if((*it)->getConnectionInProgress()) { - if(max_fd && (FD_ISSET((*it)->getInFd(), &read_set) || FD_ISSET((*it)->getInFd(), &write_set)) && (*it)->isConnected()) + if(active_fds > 0 && (FD_ISSET((*it)->getInFd(), &read_set) || FD_ISSET((*it)->getInFd(), &write_set)) && (*it)->isConnected()) { - max_fd--; + active_fds--; (*it)->updateInConnectivity(true); } - else if((!max_fd || (FD_ISSET((*it)->getInFd(), &read_set) || FD_ISSET((*it)->getInFd(), &write_set))) && !(*it)->isConnected()) + else if((active_fds == 0 || (FD_ISSET((*it)->getInFd(), &read_set) || FD_ISSET((*it)->getInFd(), &write_set))) && !(*it)->isConnected()) { (*it)->updateInConnectivity(false); } @@ -2352,7 +2351,7 @@ int main(int argc, char *argv[]) if ((-1 == close(broad_fd)) && (errno != EBADF)){ log_perror("close failed"); } - if (-1 == unlink(pidFilePath.c_str())){ + if (-1 == unlink(pidFilePath.c_str()) && errno != ENOENT){ log_perror("unlink failed") << "\t" << pidFilePath << endl; } return 0; diff --git a/services/Makefile.am b/services/Makefile.am index 041279b3b..31ccd6d76 100644 --- a/services/Makefile.am +++ b/services/Makefile.am @@ -19,7 +19,8 @@ noinst_HEADERS = \ getifaddrs.h \ logging.h \ tempfile.h \ - platform.h + platform.h \ + util.h pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = icecc.pc diff --git a/services/comm.cpp b/services/comm.cpp index 78d1fedf0..e93cc7c36 100644 --- a/services/comm.cpp +++ b/services/comm.cpp @@ -50,6 +50,9 @@ #ifdef HAVE_LIBCAP_NG #include #endif +#include "getifaddrs.h" +#include +#include #include "logging.h" #include "job.h" @@ -66,6 +69,14 @@ using namespace std; #define MAX_MSG_SIZE 1 * 1024 * 1024 +/* + * On a slow and congested network it's possible for a send call to get starved. + * This will happen especially when trying to send a huge number of bytes over at + * once. We can avoid this situation to a large extend by sending smaller + * chunks of data over. + */ +#define MAX_WRITE_SIZE 10 * 1024 + /* TODO * buffered in/output per MsgChannel + move read* into MsgChannel, create buffer-fill function @@ -87,6 +98,7 @@ bool MsgChannel::read_a_bit() if (count < 128) { inbuflen = (inbuflen + 128 + 127) & ~(size_t) 127; inbuf = (char *) realloc(inbuf, inbuflen); + assert(inbuf); // Probably unrecoverable if realloc fails anyway. count = inbuflen - inofs; } @@ -123,7 +135,14 @@ bool MsgChannel::read_a_bit() error = true; } - return !error; + if (error) { + // Daemons sometimes successfully do accept() but then the connection + // gets ECONNRESET. Probably a spurious result from accept(), so + // just be silent about it in this case. + set_error( instate == NEED_PROTO ); + return false; + } + return true; } bool MsgChannel::update_state(void) @@ -152,6 +171,7 @@ bool MsgChannel::update_state(void) if (remote_prot < MIN_PROTOCOL_VERSION || remote_prot > (1 << 20)) { remote_prot = 0; + set_error(); return false; } @@ -166,6 +186,7 @@ bool MsgChannel::update_state(void) writefull(vers, 4); if (!flush_writebuf(true)) { + set_error(); return false; } @@ -176,6 +197,7 @@ bool MsgChannel::update_state(void) if ((int)remote_prot != protocol) { protocol = 0; + set_error(); return false; } @@ -184,6 +206,8 @@ bool MsgChannel::update_state(void) break; } else { trace() << "NEED_PROTO but protocol > 0" << endl; + set_error(); + return false; } } @@ -192,7 +216,7 @@ bool MsgChannel::update_state(void) if (instate != NEED_LEN) { break; } - + // fallthrough case NEED_LEN: if (text_based) { @@ -214,12 +238,15 @@ bool MsgChannel::update_state(void) (*this) >> inmsglen; if (inmsglen > MAX_MSG_SIZE) { + log_error() << "received a too large message (size " << inmsglen << "), ignoring" << endl; + set_error(); return false; } if (inbuflen - intogo < inmsglen) { inbuflen = (inmsglen + intogo + 127) & ~(size_t)127; inbuf = (char *) realloc(inbuf, inbuflen); + assert(inbuf); // Probably unrecoverable if realloc fails anyway. } instate = FILL_BUF; @@ -227,7 +254,7 @@ bool MsgChannel::update_state(void) } else { break; } - + /* FALLTHROUGH */ case FILL_BUF: if (inofs - intogo >= inmsglen) { @@ -241,6 +268,9 @@ bool MsgChannel::update_state(void) case HAS_MSG: /* handled elsewere */ break; + + case ERROR: + return false; } return true; @@ -277,6 +307,7 @@ void MsgChannel::writefull(const void *_buf, size_t count) /* Realloc to a multiple of 128. */ msgbuflen = (msgtogo + count + 127) & ~(size_t)127; msgbuf = (char *) realloc(msgbuf, msgbuflen); + assert(msgbuf); // Probably unrecoverable if realloc fails anyway. } memcpy(msgbuf + msgtogo, _buf, count); @@ -290,12 +321,12 @@ bool MsgChannel::flush_writebuf(bool blocking) while (msgtogo) { #ifdef MSG_NOSIGNAL - ssize_t ret = send(fd, buf, msgtogo, MSG_NOSIGNAL); + ssize_t ret = send(fd, buf, msgtogo < MAX_WRITE_SIZE ? msgtogo : MAX_WRITE_SIZE, MSG_NOSIGNAL); #else void (*oldsigpipe)(int); oldsigpipe = signal(SIGPIPE, SIG_IGN); - ssize_t ret = send(fd, buf, msgtogo, 0); + ssize_t ret = send(fd, buf, msgtogo < MAX_WRITE_SIZE ? msgtogo : MAX_WRITE_SIZE, 0); signal(SIGPIPE, oldsigpipe); #endif @@ -306,7 +337,7 @@ bool MsgChannel::flush_writebuf(bool blocking) /* If we want to write blocking, but couldn't write anything, select on the fd. */ - if (blocking && errno == EAGAIN) { + if (blocking && ( errno == EAGAIN || errno == ENOTCONN )) { int ready; for (;;) { @@ -348,7 +379,11 @@ bool MsgChannel::flush_writebuf(bool blocking) msgofs = buf - msgbuf; chop_output(); - return !error; + if(error) { + set_error(); + return false; + } + return true; } MsgChannel &MsgChannel::operator>>(uint32_t &buf) @@ -482,6 +517,7 @@ void MsgChannel::readcompressed(unsigned char **uncompressed_buf, size_t &_uclen uncompressed_len = 0; _uclen = uncompressed_len; _clen = compressed_len; + set_error(); return; } @@ -528,6 +564,7 @@ void MsgChannel::writecompressed(const unsigned char *in_buf, size_t _in_len, si /* Realloc to a multiple of 128. */ msgbuflen = (msgtogo + out_len + 127) & ~(size_t)127; msgbuf = (char *) realloc(msgbuf, msgbuflen); + assert(msgbuf); // Probably unrecoverable if realloc fails anyway. } lzo_byte *out_buf = (lzo_byte *)(msgbuf + msgtogo); @@ -542,6 +579,9 @@ void MsgChannel::writecompressed(const unsigned char *in_buf, size_t _in_len, si } uint32_t _olen = htonl(out_len); + if(out_len > MAX_MSG_SIZE) { + log_error() << "internal error - size of compressed message to write exceeds max size:" << out_len << endl; + } memcpy(msgbuf + msgtogo_old, &_olen, 4); msgtogo += out_len; _out_len = out_len; @@ -573,6 +613,18 @@ void MsgChannel::write_line(const string &line) } } +void MsgChannel::set_error(bool silent) +{ + if( instate == ERROR ) { + return; + } + if( !silent ) { + trace() << "setting error state for channel " << dump() << endl; + } + instate = ERROR; + eof = true; +} + static int prepare_connect(const string &hostname, unsigned short p, struct sockaddr_in &remote_addr) { @@ -692,10 +744,11 @@ MsgChannel *Service::createChannel(const string &hostname, unsigned short p, int setsockopt(remote_fd, SOL_SOCKET, SO_SNDBUF, &i, sizeof(i)); if (connect(remote_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)) < 0) { + log_perror_trace("connect"); + trace() << "connect failed on " << hostname << endl; if (-1 == close(remote_fd) && (errno != EBADF)){ log_perror("close failed"); } - trace() << "connect failed on " << hostname << endl; return 0; } } @@ -722,10 +775,11 @@ MsgChannel *Service::createChannel(const string &socket_path) } if (connect(remote_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)) < 0) { + log_perror_trace("connect"); + trace() << "connect failed on " << socket_path << endl; if ((-1 == close(remote_fd)) && (errno != EBADF)){ log_perror("close failed"); } - trace() << "connect failed on " << socket_path << endl; return 0; } @@ -803,6 +857,7 @@ MsgChannel::MsgChannel(int _fd, struct sockaddr *_a, socklen_t _l, bool text) int on = 1; if (!setsockopt(_fd, SOL_SOCKET, SO_KEEPALIVE, (char *) &on, sizeof(on))) { +#if defined( TCP_KEEPIDLE ) || defined( TCPCTL_KEEPIDLE ) #if defined( TCP_KEEPIDLE ) int keepidle = TCP_KEEPIDLE; #else @@ -812,7 +867,9 @@ MsgChannel::MsgChannel(int _fd, struct sockaddr *_a, socklen_t _l, bool text) int sec; sec = MAX_SCHEDULER_PING - 3 * MAX_SCHEDULER_PONG; setsockopt(_fd, IPPROTO_TCP, keepidle, (char *) &sec, sizeof(sec)); +#endif +#if defined( TCP_KEEPINTVL ) || defined( TCPCTL_KEEPINTVL ) #if defined( TCP_KEEPINTVL ) int keepintvl = TCP_KEEPINTVL; #else @@ -821,6 +878,7 @@ MsgChannel::MsgChannel(int _fd, struct sockaddr *_a, socklen_t _l, bool text) sec = MAX_SCHEDULER_PONG; setsockopt(_fd, IPPROTO_TCP, keepintvl, (char *) &sec, sizeof(sec)); +#endif #ifdef TCP_KEEPCNT sec = 3; @@ -848,6 +906,7 @@ MsgChannel::MsgChannel(int _fd, struct sockaddr *_a, socklen_t _l, bool text) if (!flush_writebuf(true)) { protocol = 0; // unusable + set_error(); } } @@ -887,7 +946,7 @@ string MsgChannel::dump() const bool MsgChannel::wait_for_protocol() { /* protocol is 0 if we couldn't send our initial protocol version. */ - if (protocol == 0) { + if (protocol == 0 || instate == ERROR) { return false; } @@ -905,12 +964,14 @@ bool MsgChannel::wait_for_protocol() } if (ret == 0) { - log_error() << "no response from local daemon within timeout." << endl; + log_error() << "no response within timeout" << endl; + set_error(); return false; /* timeout. Consider it a fatal error. */ } if (ret < 0) { log_perror("select in wait_for_protocol()"); + set_error(); return false; } @@ -944,17 +1005,22 @@ void MsgChannel::setBulkTransfer() message to arrive. Returns false if there was some error. */ bool MsgChannel::wait_for_msg(int timeout) { + if (instate == ERROR) { + return false; + } + if (has_msg()) { return true; } if (!read_a_bit()) { trace() << "!read_a_bit\n"; + set_error(); return false; } if (timeout <= 0) { - trace() << "timeout <= 0\n"; + // trace() << "timeout <= 0\n"; return has_msg(); } @@ -978,6 +1044,7 @@ bool MsgChannel::wait_for_msg(int timeout) if (!read_a_bit()) { trace() << "!read_a_bit 2\n"; + set_error(); return false; } } @@ -985,13 +1052,13 @@ bool MsgChannel::wait_for_msg(int timeout) return true; } -Msg *MsgChannel::get_msg(int timeout) +Msg *MsgChannel::get_msg(int timeout, bool eofAllowed) { Msg *m = 0; enum MsgType type; if (!wait_for_msg(timeout)) { - trace() << "!wait_for_msg()\n"; + // trace() << "!wait_for_msg()\n"; return 0; } @@ -999,15 +1066,21 @@ Msg *MsgChannel::get_msg(int timeout) then we won't see it anymore. Return that to the caller. Don't use has_msg() here, as it returns true for eof. */ if (at_eof()) { - trace() << "saw eof without complete msg! " << instate << endl; + if (!eofAllowed) { + trace() << "saw eof without complete msg! " << instate << endl; + set_error(); + } return 0; } if (!has_msg()) { trace() << "saw eof without msg! " << eof << " " << instate << endl; + set_error(); return 0; } + size_t intogo_old = intogo; + if (text_based) { type = M_TEXT; } else { @@ -1018,6 +1091,7 @@ Msg *MsgChannel::get_msg(int timeout) switch (type) { case M_UNKNOWN: + set_error(); return 0; case M_PING: m = new PingMsg; @@ -1115,10 +1189,22 @@ Msg *MsgChannel::get_msg(int timeout) if (!m) { trace() << "no message type" << endl; + set_error(); return 0; } m->fill_from_channel(this); + + if (!text_based) { + if( intogo - intogo_old != inmsglen ) { + log_error() << "internal error - message not read correctly, message size " << inmsglen + << " read " << (intogo - intogo_old) << endl; + delete m; + set_error(); + return 0; + } + } + instate = NEED_LEN; update_state(); @@ -1127,6 +1213,9 @@ Msg *MsgChannel::get_msg(int timeout) bool MsgChannel::send_msg(const Msg &m, int flags) { + if (instate == ERROR) { + return false; + } if (instate == NEED_PROTO && !wait_for_protocol()) { return false; } @@ -1139,7 +1228,13 @@ bool MsgChannel::send_msg(const Msg &m, int flags) } else { *this << (uint32_t) 0; m.send_to_channel(this); - uint32_t len = htonl(msgtogo - msgtogo_old - 4); + uint32_t out_len = msgtogo - msgtogo_old - 4; + if(out_len > MAX_MSG_SIZE) { + log_error() << "internal error - size of message to write exceeds max size:" << out_len << endl; + set_error(); + return false; + } + uint32_t len = htonl(out_len); memcpy(msgbuf + msgtogo_old, &len, 4); } @@ -1150,9 +1245,97 @@ bool MsgChannel::send_msg(const Msg &m, int flags) return flush_writebuf((flags & SendBlocking)); } -#include "getifaddrs.h" -#include -#include +static int get_second_port_for_debug( int port ) +{ + // When running tests, we want to check also interactions between 2 schedulers, but + // when they are both local, they cannot bind to the same port. So make sure to + // send all broadcasts to both. + static bool checkedDebug = false; + static int debugPort1 = 0; + static int debugPort2 = 0; + if( !checkedDebug ) { + checkedDebug = true; + if( const char* env = getenv( "ICECC_TEST_SCHEDULER_PORTS" )) { + debugPort1 = atoi( env ); + const char* env2 = strchr( env, ':' ); + if( env2 != NULL ) + debugPort2 = atoi( env2 + 1 ); + } + } + int secondPort = 0; + if( port == debugPort1 ) + secondPort = debugPort2; + else if( port == debugPort2 ) + secondPort = debugPort1; + return secondPort ? secondPort : -1; +} + +void Broadcasts::broadcastSchedulerVersion(int scheduler_port, const char* netname, time_t starttime) +{ + // Code for older schedulers than version 38. Has endianness problems, the message size + // is not BROAD_BUFLEN and the netname is possibly not null-terminated. + const char length_netname = strlen(netname); + const int schedbuflen = 5 + sizeof(uint64_t) + length_netname; + char *buf = new char[ schedbuflen ]; + buf[0] = 'I'; + buf[1] = 'C'; + buf[2] = 'E'; + buf[3] = PROTOCOL_VERSION; + uint64_t tmp_time = starttime; + memcpy(buf + 4, &tmp_time, sizeof(uint64_t)); + buf[4 + sizeof(uint64_t)] = length_netname; + strncpy(buf + 5 + sizeof(uint64_t), netname, length_netname); + broadcastData(scheduler_port, buf, schedbuflen); + delete[] buf; + // Latest version. + buf = new char[ BROAD_BUFLEN ]; + memset(buf, 0, BROAD_BUFLEN ); + buf[0] = 'I'; + buf[1] = 'C'; + buf[2] = 'F'; // one up + buf[3] = PROTOCOL_VERSION; + uint32_t tmp_time_low = starttime & 0xffffffffUL; + uint32_t tmp_time_high = uint64_t(starttime) >> 32; + tmp_time_low = htonl( tmp_time_low ); + tmp_time_high = htonl( tmp_time_high ); + memcpy(buf + 4, &tmp_time_high, sizeof(uint32_t)); + memcpy(buf + 4 + sizeof(uint32_t), &tmp_time_low, sizeof(uint32_t)); + const int OFFSET = 4 + 2 * sizeof(uint32_t); + snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); + buf[BROAD_BUFLEN - 1] = 0; + broadcastData(scheduler_port, buf, BROAD_BUFLEN); + delete[] buf; +} + +bool Broadcasts::isSchedulerVersion(const char* buf, int buflen) +{ + if( buflen != BROAD_BUFLEN ) + return false; + // Ignore versions older than 38, they are older than us anyway, so not interesting. + if( buf[0] == 'I' && buf[1] == 'C' && buf[2] == 'F') { + return true; + } + return false; +} + +void Broadcasts::getSchedulerVersionData( const char* buf, int* protocol, time_t* time, string* netname ) +{ + assert( isSchedulerVersion( buf, BROAD_BUFLEN )); + const unsigned char other_scheduler_protocol = buf[3]; + uint32_t tmp_time_low, tmp_time_high; + memcpy(&tmp_time_high, buf + 4, sizeof(uint32_t)); + memcpy(&tmp_time_low, buf + 4 + sizeof(uint32_t), sizeof(uint32_t)); + tmp_time_low = ntohl( tmp_time_low ); + tmp_time_high = ntohl( tmp_time_high ); + time_t other_time = ( uint64_t( tmp_time_high ) << 32 ) | tmp_time_low;; + string recv_netname = string(buf + 4 + 2 * sizeof(uint32_t)); + if( protocol != NULL ) + *protocol = other_scheduler_protocol; + if( time != NULL ) + *time = other_time; + if( netname != NULL ) + *netname = recv_netname; +} /* Returns a filedesc. or a negative value for errors. */ static int open_send_broadcast(int port, const char* buf, int size) @@ -1201,14 +1384,22 @@ static int open_send_broadcast(int port, const char* buf, int size) continue; } - if (ntohl(((struct sockaddr_in *) addr->ifa_addr)->sin_addr.s_addr) == 0x7f000001) { - trace() << "ignoring localhost " << addr->ifa_name << endl; - continue; - } + static bool in_tests = getenv( "ICECC_TESTS" ) != NULL; + if (!in_tests) { + if (ntohl(((struct sockaddr_in *) addr->ifa_addr)->sin_addr.s_addr) == 0x7f000001) { + trace() << "ignoring localhost " << addr->ifa_name << endl; + continue; + } - if ((addr->ifa_flags & IFF_POINTOPOINT) || !(addr->ifa_flags & IFF_BROADCAST)) { - log_info() << "ignoring tunnels " << addr->ifa_name << endl; - continue; + if ((addr->ifa_flags & IFF_POINTOPOINT) || !(addr->ifa_flags & IFF_BROADCAST)) { + log_info() << "ignoring tunnels " << addr->ifa_name << endl; + continue; + } + } else { + if (ntohl(((struct sockaddr_in *) addr->ifa_addr)->sin_addr.s_addr) != 0x7f000001) { + trace() << "ignoring non-localhost " << addr->ifa_name << endl; + continue; + } } if (addr->ifa_broadaddr) { @@ -1232,74 +1423,22 @@ static int open_send_broadcast(int port, const char* buf, int size) return ask_fd; } -#define BROAD_BUFLEN 32 -#define BROAD_BUFLEN_OLD 16 - -static bool -get_broad_answer(int ask_fd, int timeout, char *buf2, struct sockaddr_in *remote_addr, - socklen_t *remote_len) +void Broadcasts::broadcastData(int port, const char* buf, int len) { - char buf = PROTOCOL_VERSION; - fd_set read_set; - FD_ZERO(&read_set); - FD_SET(ask_fd, &read_set); - struct timeval tv; - tv.tv_sec = timeout / 1000; - tv.tv_usec = 1000 * (timeout % 1000); - errno = 0; - - if (select(ask_fd + 1, &read_set, NULL, NULL, &tv) <= 0) { - /* Normally this is a timeout, i.e. no scheduler there. */ - if (errno) { - log_perror("waiting for scheduler"); + int fd = open_send_broadcast(port, buf, len); + if (fd >= 0) { + if ((-1 == close(fd)) && (errno != EBADF)){ + log_perror("close failed"); } - - return false; } - - *remote_len = sizeof(struct sockaddr_in); - - int len = recvfrom(ask_fd, buf2, BROAD_BUFLEN, 0, (struct sockaddr *) remote_addr, remote_len); - if (len != BROAD_BUFLEN && len != BROAD_BUFLEN_OLD) { - log_perror("get_broad_answer recvfrom()"); - return false; - } - - if ((len == BROAD_BUFLEN_OLD && buf2[0] != buf + 1) // PROTOCOL <= 32 scheduler - || (len == BROAD_BUFLEN && buf2[0] != buf + 2)) { // PROTOCOL >= 33 scheduler - log_error() << "wrong answer" << endl; - return false; - } - - buf2[len - 1] = 0; - return true; -} - -static void get_broad_data(const char* buf, const char** name, int* version, time_t* start_time) -{ - if (buf[0] == PROTOCOL_VERSION + 1) { - // Scheduler version 32 or older, didn't send us its version, assume it's 32. - if (name != NULL) - *name = buf + 1; - if (version != NULL) - *version = 32; - if (start_time != NULL) - *start_time = 0; // Unknown too. - } else if(buf[0] == PROTOCOL_VERSION + 2) { - if (version != NULL) { - uint32_t tmp_version; - memcpy(&tmp_version, buf + 1, sizeof(uint32_t)); - *version = tmp_version; - } - if (start_time != NULL) { - uint64_t tmp_time; - memcpy(&tmp_time, buf + 1 + sizeof(uint32_t), sizeof(uint64_t)); - *start_time = tmp_time; + int secondPort = get_second_port_for_debug( port ); + if( secondPort > 0 ) { + int fd2 = open_send_broadcast(secondPort, buf, len); + if (fd2 >= 0) { + if ((-1 == close(fd2)) && (errno != EBADF)){ + log_perror("close failed"); + } } - if (name != NULL) - *name = buf + 1 + sizeof(uint32_t) + sizeof(uint64_t); - } else { - abort(); } } @@ -1309,15 +1448,19 @@ DiscoverSched::DiscoverSched(const std::string &_netname, int _timeout, , schedname(_schedname) , timeout(_timeout) , ask_fd(-1) + , ask_second_fd(-1) , sport(port) , best_version(0) , best_start_time(0) + , best_port(0) , multiple(false) { time0 = time(0); if (schedname.empty()) { - const char *get = getenv("USE_SCHEDULER"); + const char *get = getenv("ICECC_SCHEDULER"); + if( get == NULL ) + get = getenv("USE_SCHEDULER"); if (get) { string scheduler = get; @@ -1342,8 +1485,7 @@ DiscoverSched::DiscoverSched(const std::string &_netname, int _timeout, netname = ""; // take whatever the machine is giving us attempt_scheduler_connect(); } else { - char buf = PROTOCOL_VERSION; - ask_fd = open_send_broadcast(sport, &buf, 1); + sendSchedulerDiscovery( PROTOCOL_VERSION ); } } @@ -1354,6 +1496,11 @@ DiscoverSched::~DiscoverSched() log_perror("close failed"); } } + if (ask_second_fd >= 0) { + if ((-1 == close(ask_second_fd)) && (errno != EBADF)){ + log_perror("close failed"); + } + } } bool DiscoverSched::timed_out() @@ -1363,7 +1510,6 @@ bool DiscoverSched::timed_out() void DiscoverSched::attempt_scheduler_connect() { - time0 = time(0) + MAX_SCHEDULER_PONG; log_info() << "scheduler is on " << schedname << ":" << sport << " (net " << netname << ")" << endl; @@ -1372,9 +1518,130 @@ void DiscoverSched::attempt_scheduler_connect() } } +void DiscoverSched::sendSchedulerDiscovery( int version ) +{ + assert( version < 128 ); + char buf = version; + ask_fd = open_send_broadcast(sport, &buf, 1); + int secondPort = get_second_port_for_debug( sport ); + if( secondPort > 0 ) + ask_second_fd = open_send_broadcast(secondPort, &buf, 1); +} + +bool DiscoverSched::isSchedulerDiscovery(const char* buf, int buflen, int* daemon_version) +{ + if( buflen != 1 ) + return false; + if( daemon_version != NULL ) { + *daemon_version = buf[ 0 ]; + } + return true; +} + +static const int BROAD_BUFLEN = 268; +static const int BROAD_BUFLEN_OLD_2 = 32; +static const int BROAD_BUFLEN_OLD_1 = 16; + +int DiscoverSched::prepareBroadcastReply(char* buf, const char* netname, time_t starttime) +{ + if (buf[0] < 33) { // old client + buf[0]++; + memset(buf + 1, 0, BROAD_BUFLEN_OLD_1 - 1); + snprintf(buf + 1, BROAD_BUFLEN_OLD_1 - 1, "%s", netname); + buf[BROAD_BUFLEN_OLD_1 - 1] = 0; + return BROAD_BUFLEN_OLD_1; + } else if (buf[0] < 36) { + // This is like 36, but 36 silently changed the size of BROAD_BUFLEN from 32 to 268. + // Since get_broad_answer() explicitly null-terminates the data, this wouldn't lead + // to those receivers reading a shorter string that would not be null-terminated, + // but still, this is what versions 33-35 actually worked with. + buf[0] += 2; + memset(buf + 1, 0, BROAD_BUFLEN_OLD_2 - 1); + uint32_t tmp_version = PROTOCOL_VERSION; + uint64_t tmp_time = starttime; + memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); + memcpy(buf + 1 + sizeof(uint32_t), &tmp_time, sizeof(uint64_t)); + const int OFFSET = 1 + sizeof(uint32_t) + sizeof(uint64_t); + snprintf(buf + OFFSET, BROAD_BUFLEN_OLD_2 - OFFSET, "%s", netname); + buf[BROAD_BUFLEN_OLD_2 - 1] = 0; + return BROAD_BUFLEN_OLD_2; + } else if (buf[0] < 38) { // exposes endianess because of not using htonl() + buf[0] += 2; + memset(buf + 1, 0, BROAD_BUFLEN - 1); + uint32_t tmp_version = PROTOCOL_VERSION; + uint64_t tmp_time = starttime; + memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); + memcpy(buf + 1 + sizeof(uint32_t), &tmp_time, sizeof(uint64_t)); + const int OFFSET = 1 + sizeof(uint32_t) + sizeof(uint64_t); + snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); + buf[BROAD_BUFLEN - 1] = 0; + return BROAD_BUFLEN; + } else { // latest version + buf[0] += 3; + memset(buf + 1, 0, BROAD_BUFLEN - 1); + uint32_t tmp_version = PROTOCOL_VERSION; + uint32_t tmp_time_low = starttime & 0xffffffffUL; + uint32_t tmp_time_high = uint64_t(starttime) >> 32; + tmp_version = htonl( tmp_version ); + tmp_time_low = htonl( tmp_time_low ); + tmp_time_high = htonl( tmp_time_high ); + memcpy(buf + 1, &tmp_version, sizeof(uint32_t)); + memcpy(buf + 1 + sizeof(uint32_t), &tmp_time_high, sizeof(uint32_t)); + memcpy(buf + 1 + 2 * sizeof(uint32_t), &tmp_time_low, sizeof(uint32_t)); + const int OFFSET = 1 + 3 * sizeof(uint32_t); + snprintf(buf + OFFSET, BROAD_BUFLEN - OFFSET, "%s", netname); + buf[BROAD_BUFLEN - 1] = 0; + return BROAD_BUFLEN; + } +} + +void DiscoverSched::get_broad_data(const char* buf, const char** name, int* version, time_t* start_time) +{ + if (buf[0] == PROTOCOL_VERSION + 1) { + // Scheduler version 32 or older, didn't send us its version, assume it's 32. + if (name != NULL) + *name = buf + 1; + if (version != NULL) + *version = 32; + if (start_time != NULL) + *start_time = 0; // Unknown too. + } else if(buf[0] == PROTOCOL_VERSION + 2) { + if (version != NULL) { + uint32_t tmp_version; + memcpy(&tmp_version, buf + 1, sizeof(uint32_t)); + *version = tmp_version; + } + if (start_time != NULL) { + uint64_t tmp_time; + memcpy(&tmp_time, buf + 1 + sizeof(uint32_t), sizeof(uint64_t)); + *start_time = tmp_time; + } + if (name != NULL) + *name = buf + 1 + sizeof(uint32_t) + sizeof(uint64_t); + } else if(buf[0] == PROTOCOL_VERSION + 3) { + if (version != NULL) { + uint32_t tmp_version; + memcpy(&tmp_version, buf + 1, sizeof(uint32_t)); + *version = ntohl( tmp_version ); + } + if (start_time != NULL) { + uint32_t tmp_time_low, tmp_time_high; + memcpy(&tmp_time_high, buf + 1 + sizeof(uint32_t), sizeof(uint32_t)); + memcpy(&tmp_time_low, buf + 1 + 2 * sizeof(uint32_t), sizeof(uint32_t)); + tmp_time_low = ntohl( tmp_time_low ); + tmp_time_high = ntohl( tmp_time_high ); + *start_time = ( uint64_t( tmp_time_high ) << 32 ) | tmp_time_low;; + } + if (name != NULL) + *name = buf + 1 + 3 * sizeof(uint32_t); + } else { + abort(); + } +} + MsgChannel *DiscoverSched::try_get_scheduler() { - if (schedname.empty() || 0 != best_version) { + if (schedname.empty()) { socklen_t remote_len; char buf2[BROAD_BUFLEN]; /* Try to get the scheduler with the newest version, and if there @@ -1392,14 +1659,20 @@ MsgChannel *DiscoverSched::try_get_scheduler() */ /* Read/test all packages arrived until now. */ - while (get_broad_answer(ask_fd, 0/*timeout*/, buf2, - (struct sockaddr_in *) &remote_addr, &remote_len)) { + while (get_broad_answer(ask_fd, 0/*timeout*/, buf2, (struct sockaddr_in *) &remote_addr, &remote_len) + || ( ask_second_fd != -1 && get_broad_answer(ask_second_fd, 0/*timeout*/, buf2, + (struct sockaddr_in *) &remote_addr, &remote_len))) { int version; time_t start_time; const char* name; get_broad_data(buf2, &name, &version, &start_time); if (strcasecmp(netname.c_str(), name) == 0) { - if (version < 33) { + if( version >= 128 || version < 1 ) { + log_warning() << "Ignoring bogus version " << version << " from scheduler found at " << inet_ntoa(remote_addr.sin_addr) + << ":" << ntohs(remote_addr.sin_port) << endl; + continue; + } + else if (version < 33) { log_info() << "Suitable scheduler found at " << inet_ntoa(remote_addr.sin_addr) << ":" << ntohs(remote_addr.sin_port) << " (unknown version)" << endl; } else { @@ -1409,11 +1682,15 @@ MsgChannel *DiscoverSched::try_get_scheduler() if (best_version != 0) multiple = true; if (best_version < version || (best_version == version && best_start_time > start_time)) { - schedname = inet_ntoa(remote_addr.sin_addr); - sport = ntohs(remote_addr.sin_port); + best_schedname = inet_ntoa(remote_addr.sin_addr); + best_port = ntohs(remote_addr.sin_port); best_version = version; best_start_time = start_time; } + } else { + log_info() << "Ignoring scheduler at " << inet_ntoa(remote_addr.sin_addr) + << ":" << ntohs(remote_addr.sin_port) << " because of a different netname (" + << name << ")" << endl; } } @@ -1421,6 +1698,8 @@ MsgChannel *DiscoverSched::try_get_scheduler() if (best_version == 0) { return 0; } + schedname = best_schedname; + sport = best_port; if (multiple) log_info() << "Selecting scheduler at " << schedname << ":" << sport << endl; @@ -1428,6 +1707,14 @@ MsgChannel *DiscoverSched::try_get_scheduler() log_perror("close failed"); } ask_fd = -1; + if( get_second_port_for_debug( sport ) > 0 ) { + if (-1 == close(ask_second_fd)){ + log_perror("close failed"); + } + ask_second_fd = -1; + } else { + assert( ask_second_fd == -1 ); + } attempt_scheduler_connect(); if (ask_fd >= 0) { @@ -1443,6 +1730,7 @@ MsgChannel *DiscoverSched::try_get_scheduler() } } else if (ask_fd >= 0) { + assert( ask_second_fd == -1 ); int status = connect(ask_fd, (struct sockaddr *) &remote_addr, sizeof(remote_addr)); if (status == 0 || (status < 0 && errno == EISCONN)) { @@ -1456,19 +1744,49 @@ MsgChannel *DiscoverSched::try_get_scheduler() return 0; } -bool DiscoverSched::broadcastData(int port, const char* buf, int len) +bool DiscoverSched::get_broad_answer(int ask_fd, int timeout, char *buf2, struct sockaddr_in *remote_addr, + socklen_t *remote_len) { - int fd = open_send_broadcast(port, buf, len); - if (fd >= 0) { - if ((-1 == close(fd)) && (errno != EBADF)){ - log_perror("close failed"); + char buf = PROTOCOL_VERSION; + fd_set read_set; + FD_ZERO(&read_set); + assert(ask_fd > 0); + FD_SET(ask_fd, &read_set); + struct timeval tv; + tv.tv_sec = timeout / 1000; + tv.tv_usec = 1000 * (timeout % 1000); + errno = 0; + + if (select(ask_fd + 1, &read_set, NULL, NULL, &tv) <= 0) { + /* Normally this is a timeout, i.e. no scheduler there. */ + if (errno && errno != EINTR) { + log_perror("waiting for scheduler"); } - return true; + + return false; } - return false; + + *remote_len = sizeof(struct sockaddr_in); + + int len = recvfrom(ask_fd, buf2, BROAD_BUFLEN, 0, (struct sockaddr *) remote_addr, remote_len); + if (len != BROAD_BUFLEN && len != BROAD_BUFLEN_OLD_1 && len != BROAD_BUFLEN_OLD_2) { + log_perror("get_broad_answer recvfrom()"); + return false; + } + + if (! ((len == BROAD_BUFLEN_OLD_1 && buf2[0] == buf + 1) // PROTOCOL <= 32 scheduler + || (len == BROAD_BUFLEN_OLD_2 && buf2[0] == buf + 2) // PROTOCOL >= 33 && < 36 scheduler + || (len == BROAD_BUFLEN && buf2[0] == buf + 2) // PROTOCOL >= 36 && < 38 scheduler + || (len == BROAD_BUFLEN && buf2[0] == buf + 3))) { // PROTOCOL >= 38 scheduler + log_error() << "Wrong scheduler discovery answer (size " << len << ", mark " << int(buf2[0]) << ")" << endl; + return false; + } + + buf2[len - 1] = 0; + return true; } -list get_netnames(int timeout, int port) +list DiscoverSched::getNetnames(int timeout, int port) { list l; int ask_fd; @@ -1503,6 +1821,11 @@ list get_netnames(int timeout, int port) return l; } +list get_netnames(int timeout, int port) +{ + return DiscoverSched::getNetnames(timeout, port); +} + void Msg::fill_from_channel(MsgChannel *) { } @@ -1548,6 +1871,10 @@ void GetCSMsg::fill_from_channel(MsgChannel *c) *c >> version; minimal_host_version = max( minimal_host_version, int( version )); } + + if (IS_PROTOCOL_39(c)) { + *c >> client_count; + } } void GetCSMsg::send_to_channel(MsgChannel *c) const @@ -1571,6 +1898,10 @@ void GetCSMsg::send_to_channel(MsgChannel *c) const if (IS_PROTOCOL_34(c)) { *c << minimal_host_version; } + + if (IS_PROTOCOL_39(c)) { + *c << client_count; + } } void UseCSMsg::fill_from_channel(MsgChannel *c) @@ -1790,6 +2121,9 @@ void JobBeginMsg::fill_from_channel(MsgChannel *c) Msg::fill_from_channel(c); *c >> job_id; *c >> stime; + if (IS_PROTOCOL_39(c)) { + *c >> client_count; + } } void JobBeginMsg::send_to_channel(MsgChannel *c) const @@ -1797,6 +2131,9 @@ void JobBeginMsg::send_to_channel(MsgChannel *c) const Msg::send_to_channel(c); *c << job_id; *c << stime; + if (IS_PROTOCOL_39(c)) { + *c << client_count; + } } void JobLocalBeginMsg::fill_from_channel(MsgChannel *c) @@ -1827,11 +2164,12 @@ void JobLocalDoneMsg::send_to_channel(MsgChannel *c) const *c << job_id; } -JobDoneMsg::JobDoneMsg(int id, int exit, unsigned int _flags) +JobDoneMsg::JobDoneMsg(int id, int exit, unsigned int _flags, unsigned int _client_count) : Msg(M_JOB_DONE) , exitcode(exit) , flags(_flags) , job_id(id) + , client_count(_client_count) { real_msec = 0; user_msec = 0; @@ -1859,13 +2197,25 @@ void JobDoneMsg::fill_from_channel(MsgChannel *c) *c >> out_uncompressed; *c >> flags; exitcode = (int) _exitcode; + // Older versions used this special exit code to identify + // EndJob messages for jobs with unknown job id. + if (!IS_PROTOCOL_39(c) && exitcode == 200) { + flags |= UnknownJobId; + } + if (IS_PROTOCOL_39(c)) { + *c >> client_count; + } } void JobDoneMsg::send_to_channel(MsgChannel *c) const { Msg::send_to_channel(c); *c << job_id; - *c << (uint32_t) exitcode; + if (!IS_PROTOCOL_39(c) && (flags & UnknownJobId)) { + *c << (uint32_t) 200; + } else { + *c << (uint32_t) exitcode; + } *c << real_msec; *c << user_msec; *c << sys_msec; @@ -1875,9 +2225,32 @@ void JobDoneMsg::send_to_channel(MsgChannel *c) const *c << out_compressed; *c << out_uncompressed; *c << flags; + if (IS_PROTOCOL_39(c)) { + *c << client_count; + } +} + +void JobDoneMsg::set_unknown_job_client_id( uint32_t clientId ) +{ + flags |= UnknownJobId; + job_id = clientId; +} + +uint32_t JobDoneMsg::unknown_job_client_id() const +{ + if( flags & UnknownJobId ) { + return job_id; + } + return 0; +} + +void JobDoneMsg::set_job_id( uint32_t jobId ) +{ + job_id = jobId; + flags &= ~ (uint32_t) UnknownJobId; } -LoginMsg::LoginMsg(unsigned int myport, const std::string &_nodename, const std::string _host_platform) +LoginMsg::LoginMsg(unsigned int myport, const std::string &_nodename, const std::string &_host_platform) : Msg(M_LOGIN) , port(myport) , max_kids(0) diff --git a/services/comm.h b/services/comm.h index 9518e4526..ec0f36431 100644 --- a/services/comm.h +++ b/services/comm.h @@ -36,7 +36,7 @@ #include "job.h" // if you increase the PROTOCOL_VERSION, add a macro below and use that -#define PROTOCOL_VERSION 37 +#define PROTOCOL_VERSION 39 // if you increase the MIN_PROTOCOL_VERSION, comment out macros below and clean up the code #define MIN_PROTOCOL_VERSION 21 @@ -62,6 +62,13 @@ #define IS_PROTOCOL_35(c) ((c)->protocol >= 35) #define IS_PROTOCOL_36(c) ((c)->protocol >= 36) #define IS_PROTOCOL_37(c) ((c)->protocol >= 37) +#define IS_PROTOCOL_38(c) ((c)->protocol >= 38) +#define IS_PROTOCOL_39(c) ((c)->protocol >= 39) + +// Terms used: +// S = scheduler +// C = client +// CS = daemon enum MsgType { // so far unknown @@ -125,6 +132,7 @@ enum MsgType { // C --> CS, after installing an environment M_VERIFY_ENV, + // CS --> C M_VERIFY_ENV_RESULT, // C --> CS, CS --> S (forwarded from C), to not use given host for given environment M_BLACKLIST_HOST_ENV, @@ -165,7 +173,8 @@ class MsgChannel std::string dump() const; // NULL <--> channel closed or timeout - Msg *get_msg(int timeout = 10); + // Will warn in log if EOF and !eofAllowed. + Msg *get_msg(int timeout = 10, bool eofAllowed = false); // false <--> error (msg not send) bool send_msg(const Msg &, int SendFlags = SendBlocking); @@ -175,6 +184,7 @@ class MsgChannel return eof || instate == HAS_MSG; } + // Returns ture if there were no errors filling inbuf. bool read_a_bit(void); bool at_eof(void) const @@ -226,6 +236,7 @@ class MsgChannel void chop_input(void); void chop_output(void); bool wait_for_msg(int timeout); + void set_error(bool silent = false); char *msgbuf; size_t msgbuflen; @@ -240,7 +251,8 @@ class MsgChannel NEED_PROTO, NEED_LEN, FILL_BUF, - HAS_MSG + HAS_MSG, + ERROR } instate; uint32_t inmsglen; @@ -264,6 +276,21 @@ class Service static MsgChannel *createChannel(int remote_fd, struct sockaddr *, socklen_t); }; +class Broadcasts +{ +public: + // Broadcasts a message about this scheduler and its information. + static void broadcastSchedulerVersion(int scheduler_port, const char* netname, time_t starttime); + // Checks if the data received is a scheduler version broadcast. + static bool isSchedulerVersion(const char* buf, int buflen); + // Reads data from a scheduler version broadcast. + static void getSchedulerVersionData( const char* buf, int* protocol, time_t* time, std::string* netname ); + /// Broadcasts the given data on the given port. + static const int BROAD_BUFLEN = 268; +private: + static void broadcastData(int port, const char* buf, int size); +}; + // -------------------------------------------------------------------------- // this class is also used by icecream-monitor class DiscoverSched @@ -316,8 +343,15 @@ class DiscoverSched return netname; } - /// Broadcasts the given data on the given port. - static bool broadcastData(int port, const char* buf, int size); + /* Return a list of all reachable netnames. We wait max. WAITTIME + milliseconds for answers. */ + static std::list getNetnames(int waittime = 2000, int port = 8765); + + // Checks if the data is from a scheduler discovery broadcast, returns version of the sending + // daemon is yes. + static bool isSchedulerDiscovery(const char* buf, int buflen, int* daemon_version); + // Prepares data for sending a reply to a scheduler discovery broadcast. + static int prepareBroadcastReply(char* buf, const char* netname, time_t starttime); private: struct sockaddr_in remote_addr; @@ -325,13 +359,20 @@ class DiscoverSched std::string schedname; int timeout; int ask_fd; + int ask_second_fd; // for debugging time_t time0; unsigned int sport; int best_version; time_t best_start_time; + std::string best_schedname; + int best_port; bool multiple; void attempt_scheduler_connect(); + void sendSchedulerDiscovery( int version ); + static bool get_broad_answer(int ask_fd, int timeout, char *buf2, struct sockaddr_in *remote_addr, + socklen_t *remote_len); + static void get_broad_data(const char* buf, const char** name, int* version, time_t* start_time); }; // -------------------------------------------------------------------------- @@ -360,12 +401,14 @@ class GetCSMsg : public Msg : Msg(M_GET_CS) , count(1) , arg_flags(0) - , client_id(0) {} + , client_id(0) + , client_count(0) {} GetCSMsg(const Environments &envs, const std::string &f, CompileJob::Language _lang, unsigned int _count, std::string _target, unsigned int _arg_flags, - const std::string &host, int _minimal_host_version) + const std::string &host, int _minimal_host_version, + unsigned int _client_count = 0) : Msg(M_GET_CS) , versions(envs) , filename(f) @@ -375,7 +418,8 @@ class GetCSMsg : public Msg , arg_flags(_arg_flags) , client_id(0) , preferred_host(host) - , minimal_host_version(_minimal_host_version) {} + , minimal_host_version(_minimal_host_version) + , client_count(_client_count) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; @@ -389,6 +433,7 @@ class GetCSMsg : public Msg uint32_t client_id; std::string preferred_host; int minimal_host_version; + uint32_t client_count; // number of CS -> C connections at the moment }; class UseCSMsg : public Msg @@ -549,22 +594,21 @@ class JobBeginMsg : public Msg { public: JobBeginMsg() - : Msg(M_JOB_BEGIN) {} + : Msg(M_JOB_BEGIN) + , client_count(0) {} - JobBeginMsg(unsigned int j) + JobBeginMsg(unsigned int j, unsigned int _client_count) : Msg(M_JOB_BEGIN) , job_id(j) - , stime(time(0)) {} + , stime(time(0)) + , client_count(_client_count) {} virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; uint32_t job_id; uint32_t stime; -}; - -enum SpecialExits { - CLIENT_WAS_WAITING_FOR_CS = 200 + uint32_t client_count; // number of CS -> C connections at the moment }; class JobDoneMsg : public Msg @@ -579,7 +623,13 @@ class JobDoneMsg : public Msg FROM_SUBMITTER = 1 }; - JobDoneMsg(int job_id = 0, int exitcode = -1, unsigned int flags = FROM_SERVER); + // other flags + enum { + UnknownJobId = (1 << 1) + }; + + JobDoneMsg(int job_id = 0, int exitcode = -1, unsigned int flags = FROM_SERVER, + unsigned int _client_count = 0); void set_from(from_type from) { @@ -591,6 +641,10 @@ class JobDoneMsg : public Msg return (flags & FROM_SUBMITTER) == 0; } + void set_unknown_job_client_id( uint32_t clientId ); + uint32_t unknown_job_client_id() const; + void set_job_id( uint32_t jobId ); + virtual void fill_from_channel(MsgChannel *c); virtual void send_to_channel(MsgChannel *c) const; @@ -609,6 +663,7 @@ class JobDoneMsg : public Msg uint32_t out_uncompressed; uint32_t job_id; + uint32_t client_count; // number of CS -> C connections at the moment }; class JobLocalBeginMsg : public Msg @@ -644,7 +699,7 @@ class JobLocalDoneMsg : public Msg class LoginMsg : public Msg { public: - LoginMsg(unsigned int myport, const std::string &_nodename, const std::string _host_platform); + LoginMsg(unsigned int myport, const std::string &_nodename, const std::string &_host_platform); LoginMsg() : Msg(M_LOGIN) , port(0) {} @@ -681,8 +736,9 @@ class StatsMsg : public Msg public: StatsMsg() : Msg(M_STATS) + , load(0) + , client_count(0) { - load = 0; } virtual void fill_from_channel(MsgChannel *c); @@ -703,6 +759,8 @@ class StatsMsg : public Msg uint32_t loadAvg5; uint32_t loadAvg10; uint32_t freeMem; + + uint32_t client_count; // number of CS -> C connections at the moment }; class EnvTransferMsg : public Msg @@ -751,7 +809,7 @@ class MonGetCSMsg : public GetCSMsg } MonGetCSMsg(int jobid, int hostid, GetCSMsg *m) - : GetCSMsg(Environments(), m->filename, m->lang, 1, m->target, 0, std::string(), false) + : GetCSMsg(Environments(), m->filename, m->lang, 1, m->target, 0, std::string(), false, m->client_count) , job_id(jobid) , clientid(hostid) { diff --git a/services/job.h b/services/job.h index eab666697..330765801 100644 --- a/services/job.h +++ b/services/job.h @@ -29,10 +29,9 @@ #include typedef enum { - Arg_Unspecified, - Arg_Local, - Arg_Remote, - Arg_Rest + Arg_Local, // Local-only args. + Arg_Remote, // Remote-only args. + Arg_Rest // Args to use both locally and remotely. } Argument_Type; class ArgumentsList : public std::list > @@ -50,6 +49,7 @@ class CompileJob Lang_C, Lang_CXX, Lang_OBJC, + Lang_OBJCXX, Lang_Custom } Language; @@ -65,6 +65,7 @@ class CompileJob CompileJob() : m_id(0) , m_dwarf_fission(false) + , m_block_rewrite_includes(false) { setTargetPlatform(); } @@ -89,11 +90,14 @@ class CompileJob return m_language; } + // Not used remotely. void setCompilerPathname(const std::string& pathname) { m_compiler_pathname = pathname; } + // Not used remotely. + // Use find_compiler(), as this may be empty. std::string compilerPathname() const { return m_compiler_pathname; @@ -185,6 +189,18 @@ class CompileJob m_target_platform = _target; } + // Not used remotely. + void setBlockRewriteIncludes(bool flag) + { + m_block_rewrite_includes = flag; + } + + // Not used remotely. + bool blockRewriteIncludes() const + { + return m_block_rewrite_includes; + } + private: std::list flags(Argument_Type argumentType) const; void setTargetPlatform(); @@ -199,6 +215,7 @@ class CompileJob std::string m_working_directory; std::string m_target_platform; bool m_dwarf_fission; + bool m_block_rewrite_includes; }; inline void appendList(std::list &list, const std::list &toadd) @@ -223,6 +240,9 @@ inline std::ostream &operator<<( std::ostream &output, case CompileJob::Lang_OBJC: output << "ObjC"; break; + case CompileJob::Lang_OBJCXX: + output << "ObjC++"; + break; } return output; } diff --git a/services/logging.cpp b/services/logging.cpp index 2a4a38219..332ae6658 100644 --- a/services/logging.cpp +++ b/services/logging.cpp @@ -26,28 +26,72 @@ #include #include #include +#include #ifdef __linux__ #include #endif using namespace std; -int debug_level = 0; +int debug_level = Error; ostream *logfile_trace = 0; ostream *logfile_info = 0; ostream *logfile_warning = 0; ostream *logfile_error = 0; string logfile_prefix; +volatile sig_atomic_t reset_debug_needed = 0; static ofstream logfile_null("/dev/null"); static ofstream logfile_file; static string logfile_filename; -void reset_debug(int); +static void reset_debug_signal_handler(int); + +// Implementation of an iostream helper that allows redirecting output to a given file descriptor. +// This seems to be the only portable way to do it. +namespace +{ +class ofdbuf : public streambuf +{ +public: + explicit ofdbuf( int fd ) : fd( fd ) {} + virtual int_type overflow( int_type c ); + virtual streamsize xsputn( const char* c, streamsize n ); +private: + int fd; +}; + +ofdbuf::int_type ofdbuf::overflow( int_type c ) +{ + if( c != EOF ) { + char cc = c; + if( write( fd, &cc, 1 ) != 1 ) + return EOF; + } + return c; +} + +streamsize ofdbuf::xsputn( const char* c, streamsize n ) +{ + return write( fd, c, n ); +} + +ostream* ccache_stream( int fd ) +{ + int status = fcntl( fd, F_GETFL ); + if( status < 0 || ( status & ( O_WRONLY | O_RDWR )) == 0 ) { + // As logging is not set up yet, this will log to stderr. + log_warning() << "UNCACHED_ERR_FD provides an invalid file descriptor, using stderr" << endl; + return &cerr; // fd is not valid fd for writting + } + static ofdbuf buf( fd ); + static ostream stream( &buf ); + return &stream; +} +} // namespace void setup_debug(int level, const string &filename, const string &prefix) { - string fname = filename; debug_level = level; logfile_prefix = prefix; logfile_filename = filename; @@ -63,6 +107,7 @@ void setup_debug(int level, const string &filename, const string &prefix) logfile_file.open(filename.c_str(), fstream::out | fstream::app); #ifdef __linux__ + string fname = filename; if (fname[0] != '/') { char buf[PATH_MAX]; @@ -75,6 +120,8 @@ void setup_debug(int level, const string &filename, const string &prefix) setenv("SEGFAULT_OUTPUT_NAME", fname.c_str(), false); #endif output = &logfile_file; + } else if( const char* ccache_err_fd = getenv( "UNCACHED_ERR_FD" )) { + output = ccache_stream( atoi( ccache_err_fd )); } else { output = &cerr; } @@ -83,38 +130,73 @@ void setup_debug(int level, const string &filename, const string &prefix) (void) dlopen("libSegFault.so", RTLD_NOW | RTLD_LOCAL); #endif - if (debug_level & Debug) { + if (debug_level >= Debug) { logfile_trace = output; } else { logfile_trace = &logfile_null; } - if (debug_level & Info) { + if (debug_level >= Info) { logfile_info = output; } else { logfile_info = &logfile_null; } - if (debug_level & Warning) { + if (debug_level >= Warning) { logfile_warning = output; } else { logfile_warning = &logfile_null; } - if (debug_level & Error) { + if (debug_level >= Error) { logfile_error = output; } else { logfile_error = &logfile_null; } - signal(SIGHUP, reset_debug); + signal(SIGHUP, reset_debug_signal_handler); } -void reset_debug(int) +void reset_debug() { setup_debug(debug_level, logfile_filename); } +void reset_debug_signal_handler(int) +{ + reset_debug_needed = 1; +} + +void reset_debug_if_needed() +{ + if( reset_debug_needed ) { + reset_debug_needed = 0; + reset_debug(); + if( const char* env = getenv( "ICECC_TEST_FLUSH_LOG_MARK" )) { + ifstream markfile( env ); + string mark; + getline( markfile, mark ); + if( !mark.empty()) { + assert( logfile_trace != NULL ); + *logfile_trace << "flush log mark: " << mark << endl; + } + } + if( const char* env = getenv( "ICECC_TEST_LOG_HEADER" )) { + ifstream markfile( env ); + string header1, header2, header3; + getline( markfile, header1 ); + getline( markfile, header2 ); + getline( markfile, header3 ); + if( !header1.empty()) { + assert( logfile_trace != NULL ); + *logfile_trace << header1 << endl; + *logfile_trace << header2 << endl; + *logfile_trace << header3 << endl; + } + } + } +} + void close_debug() { if (logfile_null.is_open()) { diff --git a/services/logging.h b/services/logging.h index 8250cc0bc..d7569817c 100644 --- a/services/logging.h +++ b/services/logging.h @@ -32,11 +32,14 @@ #include #include -enum DebugLevels { - Info = 1, - Warning = 2, - Error = 4, - Debug = 8 +// Verbosity level, from least to most. +enum VerbosityLevel { + Error = 0, + Warning = 1, + Info = 2, + Debug = 3, + + MaxVerboseLevel = Debug }; extern std::ostream *logfile_info; @@ -46,7 +49,8 @@ extern std::ostream *logfile_trace; extern std::string logfile_prefix; void setup_debug(int level, const std::string &logfile = "", const std::string &prefix = ""); -void reset_debug(int); +void reset_debug_if_needed(); // if we get SIGHUP, this will handle the reset +void reset_debug(); void close_debug(); void flush_debug(); @@ -55,7 +59,7 @@ static inline std::ostream &output_date(std::ostream &os) time_t t = time(0); struct tm *tmp = localtime(&t); char buf[64]; - strftime(buf, sizeof(buf), "%T: ", tmp); + strftime(buf, sizeof(buf), "%Y-%m-%d %T: ", tmp); if (logfile_prefix.size()) { os << logfile_prefix; @@ -106,7 +110,7 @@ static inline std::ostream &trace() static inline std::ostream & log_errno(const char *prefix, int tmp_errno) { - return log_error() << prefix << " " << strerror(tmp_errno) << std::endl; + return log_error() << prefix << "(Error: " << strerror(tmp_errno) << ")" << std::endl; } static inline std::ostream & log_perror(const char *prefix) @@ -114,6 +118,16 @@ static inline std::ostream & log_perror(const char *prefix) return log_errno(prefix, errno); } +static inline std::ostream & log_errno_trace(const char *prefix, int tmp_errno) +{ + return trace() << prefix << "(Error: " << strerror(tmp_errno) << ")" << std::endl; +} + +static inline std::ostream & log_perror_trace(const char *prefix) +{ + return log_errno_trace(prefix, errno); +} + class log_block { static unsigned nesting; diff --git a/suse/icecc-scheduler.xml b/suse/icecc-scheduler.xml new file mode 100644 index 000000000..186d6f7c1 --- /dev/null +++ b/suse/icecc-scheduler.xml @@ -0,0 +1,8 @@ + + + icecream scheduler + Icecream distributed compilation scheduler. + + + + diff --git a/suse/iceccd.xml b/suse/iceccd.xml new file mode 100644 index 000000000..067037e52 --- /dev/null +++ b/suse/iceccd.xml @@ -0,0 +1,6 @@ + + + icecream daemon + Icecream distributed compilation scheduler. + + diff --git a/tests/Makefile.am b/tests/Makefile.am index cfd26d0a1..4ef3a655a 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,37 +1,26 @@ +# By default be lenient and don't fail if some tests are skipped. +# Strict mode will fail in such case. + test: test-full +test-strict: + $(MAKE) test STRICT=1 test-prepare: - if test -x /sbin/setcap; then \ - sudo /sbin/setcap cap_sys_chroot+ep ${builddir}/../daemon/iceccd cap_sys_chroot+ep ${sbindir}/iceccd ; \ - elif which filecap >/dev/null 2>/dev/null; then \ - sudo filecap ${builddir}/../daemon/iceccd sys_chroot ; \ + if test -n "$(VALGRIND)"; then \ + true; \ + elif test -x /sbin/setcap; then \ + sudo /sbin/setcap cap_sys_chroot+ep ${sbindir}/iceccd ; \ + elif command -v filecap >/dev/null 2>/dev/null; then \ sudo filecap ${sbindir}/iceccd sys_chroot ; \ else \ - exit 1 ; \ + true ; \ fi test-full: test-prepare $(MAKE) test-run -test-run: - results=`realpath -s ${builddir}/results` && builddir2=`realpath -s ${builddir}` && cd ${srcdir} && ./test.sh ${prefix} $$results --builddir=$$builddir2 - -# Automake's conditionals are dumb and adding 'test-run: clangplugin' would make it warn about -# being defined in two contexts, even though in this context it's harmless and intended. -test-run: @HAVE_CLANG_DEVEL_DEP@ - -clangplugin: ${builddir}/clangplugin.so - -# It appears there's no way to force libtool to create a shared library if AC_DISABLE_SHARED is used -# in configure (and it is, as icecream uses static libs). So just do it manually. If this matters -# for anybody, feel free to do this in a better way. -${builddir}/clangplugin.so: clangplugin.cpp - $(CXX) -shared -fPIC -o $@ $^ -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -fno-rtti - -clean: clean-clangplugin - -clean-clangplugin: - rm -f ${builddir}/clangplugin.so +test-run: test-setup.sh + results=`realpath -s ${builddir}/results` && builddir2=`realpath -s ${builddir}` && cd ${srcdir} && /bin/bash test.sh ${prefix} $$results --builddir=$$builddir2 --strict=$(STRICT) --valgrind=$(VALGRIND) TESTS = testargs @@ -40,3 +29,5 @@ testargs_LDADD = ../client/libclient.a ../services/libicecc.la $(LIBRSYNC) check_PROGRAMS = testargs testargs_SOURCES = args.cpp + +check_SCRIPTS = test.sh test-setup.sh diff --git a/tests/Makefile.test b/tests/Makefile.test index 1196b5a3f..4ff5a86b2 100644 --- a/tests/Makefile.test +++ b/tests/Makefile.test @@ -5,10 +5,10 @@ OBJS = $(patsubst %.cpp,$(OUTDIR)/%.o,$(SOURCES)) maketest: $(OUTDIR)/maketest $(OUTDIR)/maketest: $(OBJS) - g++ -o $@ $^ + $(CXX) -o $@ $^ $(OUTDIR)/%.o: %.cpp - g++ -o $@ -c $^ + $(CXX) -o $@ -c $^ clean: rm -f $(OBJS) $(OUTDIR)/maketest diff --git a/tests/README b/tests/README index 026fa5b56..b9b9a2c3b 100644 --- a/tests/README +++ b/tests/README @@ -14,7 +14,8 @@ install Icecream for the tests (really install, no $DESTDIR). It is however not necessary to do a system install, simply install somewhere. If you want to test also remote builds (recommended), it is necessary for the install to have the capability to chroot. This is most easily done by giving the CAP_SYS_CHROOT -Linux capability to the iceccd binary. +Linux capability to the iceccd binary (which is done is triggered to be done +automatically by "make test" using sudo). An example of building Icecream and testing it: @@ -44,7 +45,7 @@ files {daemon}_all.log include complete logs (up to but not including the failed Exit status of test.sh is 0 for all tests passing, 1 for all tests passing but some being skipped and 2 for errors. -If you want to use icemon for the tests, use 'USE_SCHEDULER=localhost:8767 icemon'. Note that +If you want to use icemon for the tests, use 'ICECC_SCHEDULER=localhost:8767 icemon'. Note that icemon needs to be built with recent (Feb 2014) libicecc library for this to work. @@ -52,6 +53,10 @@ Valgrind: ========= It is possible to pass --valgrind to test.sh in order to test (almost) everything with Valgrind. +Valgrind may be also automatically invoked using: + + make test VALGRIND=1 + Note that in order to be able to chroot, it is necessary to give the capability to the actual Valgrind binary instead of iceccd (this is because this binary is what actually runs as the process): @@ -62,9 +67,6 @@ Do not forget to reset it back when done: sudo /sbin/setcap -r /usr/lib/valgrind/memcheck-x86-linux -If anything in the test exits with exit code 10, it is very likely because Valgrind detected -an error. Check valgrind logs in the log directory. - Adding new tests: ================= diff --git a/tests/args.cpp b/tests/args.cpp index e34680f7c..dd74be1a9 100644 --- a/tests/args.cpp +++ b/tests/args.cpp @@ -18,7 +18,7 @@ void restore_icecc_color_diagnostics() { unsetenv("ICECC_COLOR_DIAGNOSTICS"); } -void test_run(const string &prefix, const char * const *argv, bool icerun, const string expected) { +void test_run(const string &prefix, const char * const *argv, bool icerun, const string& expected) { list extrafiles; CompileJob job; bool local = analyse_argv(argv, job, icerun, &extrafiles); diff --git a/tests/clangplugin.cpp b/tests/clangplugin.cpp index ab6097bd7..8bdfe6e0c 100644 --- a/tests/clangplugin.cpp +++ b/tests/clangplugin.cpp @@ -8,6 +8,7 @@ * */ +#include #include #include #include @@ -29,7 +30,11 @@ class Action : public PluginASTAction { public: +#if (CLANG_VERSION_MAJOR == 3 && CLANG_VERSION_MINOR >= 6) || CLANG_VERSION_MAJOR > 3 + virtual std::unique_ptr CreateASTConsumer( CompilerInstance& compiler, StringRef infile ); +#else virtual ASTConsumer* CreateASTConsumer( CompilerInstance& compiler, StringRef infile ); +#endif virtual bool ParseArgs( const CompilerInstance& compiler, const vector< string >& args ); private: vector< string > _args; @@ -47,12 +52,19 @@ class Consumer }; +#if (CLANG_VERSION_MAJOR == 3 && CLANG_VERSION_MINOR >= 6) || CLANG_VERSION_MAJOR > 3 +std::unique_ptr Action::CreateASTConsumer( CompilerInstance& compiler, StringRef ) + { + return unique_ptr( new Consumer( compiler, _args )); + } +#else ASTConsumer* Action::CreateASTConsumer( CompilerInstance& compiler, StringRef ) { return new Consumer( compiler, _args ); } +#endif -bool Action::ParseArgs( const CompilerInstance& compiler, const vector< string >& args ) +bool Action::ParseArgs( const CompilerInstance& /*compiler*/, const vector< string >& args ) { _args = args; return true; @@ -106,7 +118,7 @@ bool Consumer::VisitReturnStmt( const ReturnStmt* returnstmt ) void report( const CompilerInstance& compiler, DiagnosticsEngine::Level level, const char* txt, SourceLocation loc ) { DiagnosticsEngine& engine = compiler.getDiagnostics(); -#if (__clang_major__ == 3 && __clang_minor__ >= 5) || __clang_major__ > 3 +#if (CLANG_VERSION_MAJOR == 3 && CLANG_VERSION_MINOR >= 5) || CLANG_VERSION_MAJOR > 3 if( loc.isValid()) engine.Report( loc, engine.getDiagnosticIDs()->getCustomDiagID( static_cast< DiagnosticIDs::Level >( level ), txt )); diff --git a/tests/fsanitize-blacklist.txt b/tests/fsanitize-blacklist.txt new file mode 100644 index 000000000..447cf7846 --- /dev/null +++ b/tests/fsanitize-blacklist.txt @@ -0,0 +1 @@ +fun:*test_fsanitize_function* diff --git a/tests/fsanitize.cpp b/tests/fsanitize.cpp new file mode 100644 index 000000000..ec478eb4f --- /dev/null +++ b/tests/fsanitize.cpp @@ -0,0 +1,13 @@ +void test_fsanitize_function() + { + int* arr = new int[10]; + delete[] arr; + int r = arr[ 0 ]; + (void)r; + } + +int main() + { + test_fsanitize_function(); + return 0; + } diff --git a/tests/icerun-test.sh b/tests/icerun-test.sh index 895c1ea82..35f5ce264 100755 --- a/tests/icerun-test.sh +++ b/tests/icerun-test.sh @@ -5,7 +5,11 @@ num="$2" test -z "$dir" -o -z "$num" && exit 1 touch "$dir"/running$num -sleep 0.2 +if test -z "$ICERUN_TEST_VALGRIND"; then + sleep 0.2 +else + sleep 1 +fi rm "$dir"/running$num touch "$dir"/done$num exit 0 diff --git a/tests/includes-without.cpp b/tests/includes-without.cpp new file mode 100644 index 000000000..a0a0671b3 --- /dev/null +++ b/tests/includes-without.cpp @@ -0,0 +1,7 @@ +// #include "includes.h" - will be done using -include includes.h +#include + +void f() + { + std::cout << std::endl; // use something included only by includes.h + } diff --git a/tests/includes.cpp b/tests/includes.cpp index 6c1afb20f..b7469e1c0 100644 --- a/tests/includes.cpp +++ b/tests/includes.cpp @@ -3,4 +3,5 @@ void f() { + std::cout << std::endl; // use something included only by includes.h } diff --git a/tests/includes.h b/tests/includes.h index 8c9e5d66a..2328296b2 100644 --- a/tests/includes.h +++ b/tests/includes.h @@ -1,7 +1,7 @@ #ifndef INCLUDES_H #define INCLUDES_H -#include +#include #include #endif diff --git a/tests/recursive_clang++ b/tests/recursive_clang++ new file mode 100755 index 000000000..b2fb435bb --- /dev/null +++ b/tests/recursive_clang++ @@ -0,0 +1,5 @@ +#! /bin/sh + +# Indirectly invoke icecc again. + +exec icecc clang++ "$@" diff --git a/tests/test-setup.sh.in b/tests/test-setup.sh.in new file mode 100644 index 000000000..7c377b8e6 --- /dev/null +++ b/tests/test-setup.sh.in @@ -0,0 +1,4 @@ +# Sourced by test.sh , not to be used directly. + +# Needed for locating our compiler wrapper symlinks. +pkglibexecdir=@PKGLIBEXECDIR@ diff --git a/tests/test.sh b/tests/test.sh index 9bbb69b81..17ad82f91 100755 --- a/tests/test.sh +++ b/tests/test.sh @@ -5,26 +5,52 @@ testdir="$2" shift shift valgrind= -builddir= +builddir=. +strict= usage() { - echo Usage: "$0 [--builddir=dir] [--valgrind[=command]]" + echo Usage: "$0 [--builddir=dir] [--valgrind[=command]] [--strict[=value]]" exit 3 } +get_default_valgrind_flags() +{ + default_valgrind_args="--num-callers=50 --suppressions=valgrind_suppressions --log-file=$testdir/valgrind-%p.log" + # Check if valgrind knows --error-markers, which makes it simpler to find out if log contains any error. + valgrind_error_markers="--error-marker2s=ICEERRORBEGIN,ICEERROREND" + valgrind $valgrind_error_markers true 2>/dev/null + if test $? -eq 0; then + default_valgrind_args="$default_valgrind_args $valgrind_error_markers" + else + valgrind_error_markers= + fi +} + while test -n "$1"; do case "$1" in - --valgrind) - valgrind="valgrind --leak-check=no --error-exitcode=10 --suppressions=valgrind_suppressions --log-file=$testdir/valgrind-%p.log --" - rm -f "$testdir"/valgrind-*.log + --valgrind|--valgrind=1) + get_default_valgrind_flags + valgrind="valgrind --leak-check=no $default_valgrind_args --" + ;; + --valgrind=) + # when invoked from Makefile, no valgrind ;; --valgrind=*) - valgrind="`echo $1 | sed 's/^--valgrind=//'` --error-exitcode=10 --suppressions=valgrind_suppressions --log-file=$testdir/valgrind-%p.log --" - rm -f "$testdir"/valgrind-*.log + get_default_valgrind_flags + valgrind="${1#--valgrind=} $default_valgrind_args --" ;; --builddir=*) - builddir=`echo $1 | sed 's/^--builddir=//'` + builddir="${1#--builddir=}" + ;; + --strict) + strict=1 + ;; + --strict=*) + strict="${1#--strict=}" + if test "$strict" = "0"; then + strict= + fi ;; *) usage @@ -33,14 +59,21 @@ while test -n "$1"; do shift done +. $builddir/test-setup.sh +if test $? -ne 0; then + echo Error sourcing test-setup.sh file, aborting. + exit 4 +fi + icecc="${prefix}/bin/icecc" iceccd="${prefix}/sbin/iceccd" icecc_scheduler="${prefix}/sbin/icecc-scheduler" -if [[ -n "${builddir}" ]]; then - icecc="${builddir}/../client/icecc" - iceccd="${builddir}/../daemon/iceccd" - icecc_scheduler="${builddir}/../scheduler/icecc-scheduler" -fi +icecc_create_env="${prefix}/bin/icecc-create-env" +icecc_test_env="${prefix}/bin/icecc-test-env" +icerun="${prefix}/bin/icerun" +wrapperdir="${pkglibexecdir}/bin" +netname="icecctestnetname$$" +protocolversion=$(grep '#define PROTOCOL_VERSION ' ../services/comm.h | sed 's/#define PROTOCOL_VERSION //') if test -z "$prefix" -o ! -x "$icecc"; then usage @@ -62,35 +95,78 @@ unset ICECC_REPEAT_RATE unset ICECC_PREFERRED_HOST unset ICECC_CC unset ICECC_CXX +unset ICECC_REMOTE_CPP unset ICECC_CLANG_REMOTE_CPP unset ICECC_IGNORE_UNVERIFIED unset ICECC_EXTRAFILES unset ICECC_COLOR_DIAGNOSTICS unset ICECC_CARET_WORKAROUND -GCC=/usr/bin/gcc -GXX=/usr/bin/g++ -CLANG=/usr/bin/clang -CLANGXX=/usr/bin/clang++ - mkdir -p "$testdir" skipped_tests= chroot_disabled= -debug_fission_disabled= -$GXX -E -gsplit-dwarf messages.cpp 2>/dev/null >/dev/null || debug_fission_disabled=1 -if test -n "$debug_fission_disabled"; then - skipped_tests="$skipped_tests split-dwarf(g++)" -fi +flush_log_mark=1 +last_reset_log_mark= +last_section_log_mark= -abort_tests() +check_compilers() { - for logfile in "$testdir"/*.log*; do - echo "Log file: ${logfile}" - cat ${logfile} - done + if test -z "$TESTCC"; then + if cc -v >/dev/null 2>/dev/null; then + TESTCC=/usr/bin/cc + elif gcc -v >/dev/null 2>/dev/null; then + TESTCC=/usr/bin/gcc + elif clang -v >/dev/null 2>/dev/null; then + TESTCC=/usr/bin/clang + else + echo Cannot find gcc or clang, explicitly set TESTCC. + exit 5 + fi + fi + if test -z "$TESTCXX"; then + if c++ -v >/dev/null 2>/dev/null; then + TESTCXX=/usr/bin/c++ + elif g++ -v >/dev/null 2>/dev/null; then + TESTCXX=/usr/bin/g++ + elif clang -v >/dev/null 2>/dev/null; then + TESTCXX=/usr/bin/clang++ + else + echo Cannot find g++ or clang++, explicitly set TESTCXX. + exit 5 + fi + fi + using_gcc= + if $TESTCC -v 2>&1 | grep ^gcc >/dev/null; then + using_gcc=1 + fi + using_clang= + if $TESTCC --version | grep clang >/dev/null; then + using_clang=1 + fi + echo Using C compiler: $TESTCC + $TESTCC --version + if test $? -ne 0; then + echo Compiler $TESTCC failed. + exit 5 + fi + echo Using C++ compiler: $TESTCXX + $TESTCXX --version + if test $? -ne 0; then + echo Compiler $TESTCXX failed. + exit 5 + fi + if test -z "$using_gcc" -a -z "$using_clang"; then + echo "Unknown compiler type (neither GCC nor Clang), aborting." + exit 5 + fi + echo +} +abort_tests() +{ + dump_logs exit 2 } @@ -98,27 +174,14 @@ start_iceccd() { name=$1 shift - ICECC_TEST_SOCKET="$testdir"/socket-${name} $valgrind "${iceccd}" -s localhost:8767 -b "$testdir"/envs-${name} -l "$testdir"/${name}.log -N ${name} -v -v -v "$@" 2>>"$testdir"/iceccdstderr_${name}.log & + ICECC_TEST_SOCKET="$testdir"/socket-${name} ICECC_SCHEDULER=:8767 ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ + ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ + $valgrind "${iceccd}" -b "$testdir"/envs-${name} -l "$testdir"/${name}.log -n ${netname} -N ${name} -v -v -v "$@" & pid=$! - wait_for_proc_sleep 10 ${pid} eval ${name}_pid=${pid} echo ${pid} > "$testdir"/${name}.pid } -wait_for_proc_sleep() -{ - local wait_timeout=$1 - shift - local pid_list="$@" - local proc_count=$# - local ps_state_field="state" - for wait_count in $(seq 1 ${wait_timeout}); do - local int_sleep_count=$(ps -ho ${ps_state_field} -p ${pid_list} | grep --count "S") - ((${int_sleep_count} == ${proc_count})) && break - sleep 1 - done -} - kill_daemon() { daemon=$1 @@ -144,7 +207,9 @@ kill_daemon() start_ice() { - $valgrind "${icecc_scheduler}" -p 8767 -l "$testdir"/scheduler.log -v -v -v & + ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ + ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ + $valgrind "${icecc_scheduler}" -p 8767 -l "$testdir"/scheduler.log -n ${netname} -v -v -v & scheduler_pid=$! echo $scheduler_pid > "$testdir"/scheduler.pid @@ -152,46 +217,10 @@ start_ice() start_iceccd remoteice1 -p 10246 -m 2 start_iceccd remoteice2 -p 10247 -m 2 - notready= - if test -n "$valgrind"; then - sleep 10 - else - sleep 1 - fi - for time in `seq 1 10`; do - notready= - if ! kill -0 $scheduler_pid; then - echo Scheduler start failure. - stop_ice 0 - abort_tests - fi - for daemon in localice remoteice1 remoteice2; do - pid=${daemon}_pid - if ! kill -0 ${!pid}; then - echo Daemon $daemon start failure. - stop_ice 0 - abort_tests - fi - if ! grep -q "Connected to scheduler" "$testdir"/${daemon}.log; then - # ensure log file flush - kill -HUP ${!pid} - grep -q "Connected to scheduler" "$testdir"/${daemon}.log || notready=1 - fi - done - if test -z "$notready"; then - break; - fi - sleep 1 - done - if test -n "$notready"; then - echo Icecream not ready, aborting. - stop_ice 0 - abort_tests - fi - sleep 5 # Give the scheduler time to get everything set up + wait_for_ice_startup_complete scheduler localice remoteice1 remoteice2 flush_logs - grep -q "Cannot use chroot, no remote jobs accepted." "$testdir"/remoteice1.log && chroot_disabled=1 - grep -q "Cannot use chroot, no remote jobs accepted." "$testdir"/remoteice2.log && chroot_disabled=1 + cat_log_last_mark remoteice1 | grep -q "Cannot use chroot, no remote jobs accepted." && chroot_disabled=1 + cat_log_last_mark remoteice2 | grep -q "Cannot use chroot, no remote jobs accepted." && chroot_disabled=1 if test -n "$chroot_disabled"; then skipped_tests="$skipped_tests CHROOT" echo Chroot not available, remote tests will be skipped. @@ -201,25 +230,12 @@ start_ice() # start only local daemon, no scheduler start_only_daemon() { - ICECC_TEST_SOCKET="$testdir"/socket-localice $valgrind "${iceccd}" --no-remote -s localhost:8767 -b "$testdir"/envs-localice -l "$testdir"/localice.log -N localice -m 2 -v -v -v & + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_SCHEDULER=:8767 ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ + ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ + $valgrind "${iceccd}" --no-remote -b "$testdir"/envs-localice -l "$testdir"/localice.log -n ${netname} -N localice -m 2 -v -v -v & localice_pid=$! echo $localice_pid > "$testdir"/localice.pid - if test -n "$valgrind"; then - sleep 10 - else - sleep 1 - fi - if ! kill -0 $localice_pid; then - echo Daemon localice start failure. - stop_only_daemon 0 - abort_tests - fi - flush_logs - if ! grep -q "Netnames:" "$testdir"/localice.log; then - echo Daemon localice not ready, aborting. - stop_only_daemon 0 - abort_tests - fi + wait_for_ice_startup_complete "noscheduler" localice } stop_ice() @@ -229,16 +245,18 @@ stop_ice() # 2 - do not check, do not wait (wait would fail, started by previous shell) check_type="$1" if test $check_type -eq 2; then - scheduler_pid=`cat "$testdir"/scheduler.pid 2>/dev/null` - localice_pid=`cat "$testdir"/localice.pid 2>/dev/null` - remoteice1_pid=`cat "$testdir"/remoteice1.pid 2>/dev/null` - remoteice2_pid=`cat "$testdir"/remoteice2.pid 2>/dev/null` + scheduler_pid=$(cat "$testdir"/scheduler.pid 2>/dev/null) + localice_pid=$(cat "$testdir"/localice.pid 2>/dev/null) + remoteice1_pid=$(cat "$testdir"/remoteice1.pid 2>/dev/null) + remoteice2_pid=$(cat "$testdir"/remoteice2.pid 2>/dev/null) fi if test $check_type -eq 1; then - if ! kill -0 $scheduler_pid; then - echo Scheduler no longer running. - stop_ice 0 - abort_tests + if test -n "$scheduler_pid"; then + if ! kill -0 $scheduler_pid; then + echo Scheduler no longer running. + stop_ice 0 + abort_tests + fi fi for daemon in localice remoteice1 remoteice2; do pid=${daemon}_pid @@ -266,6 +284,38 @@ stop_ice() scheduler_pid= fi rm -f "$testdir"/scheduler.pid + stop_secondary_scheduler $check_type +} + +stop_secondary_scheduler() +{ + check_type="$1" + if test $check_type -eq 2; then + scheduler2_pid=$(cat "$testdir"/scheduler2.pid 2>/dev/null) + fi + if test $check_type -eq 1; then + if test -n "$scheduler2_pid"; then + if ! kill -0 $scheduler2_pid; then + echo Secondary scheduler no longer running. + stop_ice 0 + abort_tests + fi + fi + fi + if test -n "$scheduler2_pid"; then + kill "$scheduler2_pid" 2>/dev/null + if test $check_type -eq 1; then + wait $scheduler2_pid + exitcode=$? + if test $exitcode -ne 0; then + echo Secondary scheduler exited with code $exitcode. + stop_ice 0 + abort_tests + fi + fi + scheduler2_pid= + fi + rm -f "$testdir"/scheduler2.pid } stop_only_daemon() @@ -285,12 +335,80 @@ stop_only_daemon() localice_pid= } +wait_for_ice_startup_complete() +{ + noscheduler= + if test "$1" == "noscheduler"; then + noscheduler=1 + shift + fi + processes="$@" + timeout=10 + if test -n "$valgrind"; then + # need time to set up SIGHUP handler + sleep 5 + timeout=15 + fi + notready= + for ((i=0; i/dev/null expected_exit=$? fi - reset_logs local "$@" + if test -z "$noresetlogs"; then + reset_logs local "$@" + else + mark_logs local "$@" + fi echo Running: "$@" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=localice ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" "$@" 2>"$testdir"/stderr.localice @@ -328,12 +481,14 @@ run_ice() if test -n "$split_dwarf"; then mv "$split_dwarf" "$split_dwarf".localice fi - cat "$testdir"/stderr.localice >> "$testdir"/stderr.log + cat "$testdir"/stderr.localice >> "$testdir"/stderr.localice.log flush_logs - check_logs_for_generic_errors + check_logs_for_generic_errors $stderrfixforlog if test "$remote_type" = "remote"; then check_log_message icecc "building myself, but telling localhost" - check_log_error icecc "" + if test -z "$stderrfix"; then + check_log_error icecc "" + fi else check_log_message icecc "" check_log_error icecc "building myself, but telling localhost" @@ -345,7 +500,7 @@ run_ice() fi if test -z "$chroot_disabled"; then - reset_logs remote "$@" + mark_logs remote "$@" ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" "$@" 2>"$testdir"/stderr.remoteice remoteice_exit=$? if test -n "$output"; then @@ -354,12 +509,27 @@ run_ice() if test -n "$split_dwarf"; then mv "$split_dwarf" "$split_dwarf".remoteice fi - cat "$testdir"/stderr.remoteice >> "$testdir"/stderr.log + cat "$testdir"/stderr.remoteice >> "$testdir"/stderr.remoteice.log flush_logs - check_logs_for_generic_errors + check_logs_for_generic_errors $stderrfixforlog if test "$remote_type" = "remote"; then check_log_message icecc "Have to use host 127.0.0.1:10246" - check_log_error icecc "" + if test -z "$stderrfix"; then + check_log_error icecc "" + fi + if test -n "$output"; then + check_log_message remoteice1 "Remote compilation completed with exit code 0" + check_log_error remoteice1 "Remote compilation aborted with exit code" + check_log_error remoteice1 "Remote compilation exited with exit code" + elif test -n "$remoteabort"; then + check_log_message remoteice1 "Remote compilation aborted with exit code" + check_log_error remoteice1 "Remote compilation completed with exit code 0" + check_log_error remoteice1 "Remote compilation exited with exit code" + else + check_log_message remoteice1 "Remote compilation exited with exit code $expected_exit" + check_log_error remoteice1 "Remote compilation completed with exit code 0" + check_log_error remoteice1 "Remote compilation aborted with exit code" + fi else check_log_message icecc "" check_log_error icecc "Have to use host 127.0.0.1:10246" @@ -368,15 +538,17 @@ run_ice() check_log_error icecc "building myself, but telling localhost" if test -z "$stderrfix"; then check_log_error icecc "local build forced" + else + check_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" fi fi - reset_logs noice "$@" + mark_logs noice "$@" "$@" 2>"$testdir"/stderr normal_exit=$? cat "$testdir"/stderr >> "$testdir"/stderr.log flush_logs - check_logs_for_generic_errors + check_logs_for_generic_errors $stderrfixforlog check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "" @@ -398,17 +570,39 @@ run_ice() stop_ice 0 abort_tests fi - if ! diff -q "$testdir"/stderr.localice "$testdir"/stderr; then - echo "Stderr mismatch ($"testdir"/stderr.localice)" - stop_ice 0 - abort_tests - fi - if test -z "$chroot_disabled"; then - if ! diff -q "$testdir"/stderr.remoteice "$testdir"/stderr; then - echo "Stderr mismatch ($"testdir"/stderr.remoteice)" + if test -z "$nostderrcheck"; then + if ! diff "$testdir"/stderr.localice "$testdir"/stderr >/dev/null; then + echo "Stderr mismatch ($testdir/stderr.localice)" + echo ================ + diff -u "$testdir"/stderr "$testdir"/stderr.localice + echo ================ stop_ice 0 abort_tests fi + if test -z "$chroot_disabled"; then + skipstderrcheck= + if test -n "$unusedmacrohack" -a -n "$using_gcc"; then + # gcc -Wunused-macro gives different location for the error depending on whether -E is used or not + if ! diff "$testdir"/stderr.remoteice "$testdir"/stderr >/dev/null; then + if diff "$testdir"/stderr.remoteice unusedmacro1.txt >/dev/null; then + skipstderrcheck=1 + fi + if diff "$testdir"/stderr.remoteice unusedmacro2.txt >/dev/null; then + skipstderrcheck=1 + fi + fi + fi + if test -z "$skipstderrcheck"; then + if ! diff "$testdir"/stderr.remoteice "$testdir"/stderr >/dev/null; then + echo "Stderr mismatch ($testdir/stderr.remoteice)" + echo ================ + diff -u "$testdir"/stderr "$testdir"/stderr.remoteice + echo ================ + stop_ice 0 + abort_tests + fi + fi + fi fi local remove_offset_number="s/<[A-Fa-f0-9]*>/<>/g" @@ -416,49 +610,128 @@ run_ice() local remove_debug_pubnames="/^\s*Offset\s*Name/,/^\s*$/s/\s*[A-Fa-f0-9]*\s*//" local remove_size_of_area="s/\(Size of area in.*section:\)\s*[0-9]*/\1/g" if test -n "$output"; then - readelf -wlLiaprmfFoRt "$output" | sed -e "$remove_debug_info" \ - -e "$remove_offset_number" \ - -e "$remove_debug_pubnames" \ - -e "$remove_size_of_area" > "$output".readelf.txt || cp "$output" "$output".readelf.txt - readelf -wlLiaprmfFoRt "$output".localice | sed -e "$remove_debug_info" \ - -e "$remove_offset_number" \ - -e "$remove_debug_pubnames" \ - -e "$remove_size_of_area" > "$output".local.readelf.txt || cp "$output" "$output".local.readelf.txt - if ! diff -q "$output".local.readelf.txt "$output".readelf.txt; then - echo "Output mismatch ($output.localice)" - stop_ice 0 - abort_tests - fi - if test -z "$chroot_disabled"; then - readelf -wlLiaprmfFoRt "$output".remoteice | sed -e "$remove_debug_info" \ + if file "$output" | grep ELF >/dev/null; then + readelf -wlLiaprmfFoRt "$output" | sed -e "$remove_debug_info" \ + -e "$remove_offset_number" \ + -e "$remove_debug_pubnames" \ + -e "$remove_size_of_area" > "$output".readelf.txt || cp "$output" "$output".readelf.txt + readelf -wlLiaprmfFoRt "$output".localice | sed -e "$remove_debug_info" \ -e "$remove_offset_number" \ -e "$remove_debug_pubnames" \ - -e "$remove_size_of_area" > "$output".remote.readelf.txt || cp "$output" "$output".remote.readelf.txt - if ! diff -q "$output".remote.readelf.txt "$output".readelf.txt; then - echo "Output mismatch ($output.remoteice)" + -e "$remove_size_of_area" > "$output".local.readelf.txt || cp "$output" "$output".local.readelf.txt + if ! diff "$output".local.readelf.txt "$output".readelf.txt >/dev/null; then + echo "Output mismatch ($output.localice)" + echo ================ + diff -u "$output".readelf.txt "$output".local.readelf.txt + echo ================ + stop_ice 0 + abort_tests + fi + if test -z "$chroot_disabled"; then + readelf -wlLiaprmfFoRt "$output".remoteice | sed -e "$remove_debug_info" \ + -e "$remove_offset_number" \ + -e "$remove_debug_pubnames" \ + -e "$remove_size_of_area" > "$output".remote.readelf.txt || cp "$output" "$output".remote.readelf.txt + if ! diff "$output".remote.readelf.txt "$output".readelf.txt >/dev/null; then + echo "Output mismatch ($output.remoteice)" + echo ================ + diff -u "$output".readelf.txt "$output".remote.readelf.txt + echo ================ + stop_ice 0 + abort_tests + fi + fi + elif echo "$output" | grep -q '\.gch$'; then + # PCH file, no idea how to check they are the same if they are not 100% identical + # Make silent. + true + elif file "$output" | grep Mach >/dev/null; then + # No idea how to check they are the same if they are not 100% identical + if ! diff "$output".localice "$output" >/dev/null; then + echo "Output mismatch ($output.localice), Mach object files, not knowing how to verify" + fi + if test -z "$chroot_disabled"; then + if ! diff "$output".remoteice "$output" >/dev/null; then + echo "Output mismatch ($output.remoteice), Mach object files, not knowing how to verify" + fi + fi + elif echo "$output" | grep -q -e '\.o$' -e '\.dwo$'; then + # possibly cygwin .o file, no idea how to check they are the same if they are not 100% identical + if ! diff "$output".localice "$output" >/dev/null; then + echo "Output mismatch ($output.localice), assuming Cygwin object files, not knowing how to verify" + fi + if test -z "$chroot_disabled"; then + if ! diff "$output".remoteice "$output" >/dev/null; then + echo "Output mismatch ($output.remoteice), assuming Cygwin object files, not knowing how to verify" + fi + fi + else + if ! diff "$output".localice "$output" >/dev/null; then + echo "Output mismatch ($output.localice)" + echo ================ + diff -u "$output" "$output".localice + echo ================ stop_ice 0 abort_tests fi + if test -z "$chroot_disabled"; then + if ! diff "$output".remoteice "$output" >/dev/null; then + echo "Output mismatch ($output.remoteice)" + echo ================ + diff -u "$output" "$output".remoteice + echo ================ + stop_ice 0 + abort_tests + fi + fi fi fi if test -n "$split_dwarf"; then - readelf -wlLiaprmfFoRt "$split_dwarf" | \ - sed -e "$remove_debug_info" -e "$remove_offset_number" > "$split_dwarf".readelf.txt || cp "$split_dwarf" "$split_dwarf".readelf.txt - readelf -wlLiaprmfFoRt "$split_dwarf".localice | \ - sed -e $remove_debug_info -e "$remove_offset_number" > "$split_dwarf".local.readelf.txt || cp "$split_dwarf" "$split_dwarf".local.readelf.txt - if ! diff -q "$split_dwarf".local.readelf.txt "$split_dwarf".readelf.txt; then - echo "Output DWO mismatch ($split_dwarf.localice)" - stop_ice 0 - abort_tests - fi - if test -z "$chroot_disabled"; then - readelf -wlLiaprmfFoRt "$split_dwarf".remoteice | \ - sed -e "$remove_debug_info" -e "$remove_offset_number" > "$split_dwarf".remote.readelf.txt || cp "$split_dwarf" "$split_dwarf".remote.readelf.txt - if ! diff -q "$split_dwarf".remote.readelf.txt "$split_dwarf".readelf.txt; then - echo "Output DWO mismatch ($split_dwarf.remoteice)" + if file "$output" | grep ELF >/dev/null; then + readelf -wlLiaprmfFoRt "$split_dwarf" | \ + sed -e "$remove_debug_info" -e "$remove_offset_number" > "$split_dwarf".readelf.txt || cp "$split_dwarf" "$split_dwarf".readelf.txt + readelf -wlLiaprmfFoRt "$split_dwarf".localice | \ + sed -e $remove_debug_info -e "$remove_offset_number" > "$split_dwarf".local.readelf.txt || cp "$split_dwarf" "$split_dwarf".local.readelf.txt + if ! diff "$split_dwarf".local.readelf.txt "$split_dwarf".readelf.txt >/dev/null; then + echo "Output DWO mismatch ($split_dwarf.localice)" + echo ==================== + diff -u "$split_dwarf".readelf.txt "$split_dwarf".local.readelf.txt + echo ==================== stop_ice 0 abort_tests fi + if test -z "$chroot_disabled"; then + readelf -wlLiaprmfFoRt "$split_dwarf".remoteice | \ + sed -e "$remove_debug_info" -e "$remove_offset_number" > "$split_dwarf".remote.readelf.txt || cp "$split_dwarf" "$split_dwarf".remote.readelf.txt + if ! diff "$split_dwarf".remote.readelf.txt "$split_dwarf".readelf.txt >/dev/null; then + echo "Output DWO mismatch ($split_dwarf.remoteice)" + echo ==================== + diff -u "$split_dwarf".readelf.txt "$split_dwarf".remote.readelf.txt + echo ==================== + stop_ice 0 + abort_tests + fi + fi + elif file "$output" | grep Mach >/dev/null; then + # No idea how to check they are the same if they are not 100% identical + if ! diff "$split_dwarf".localice "$split_dwarf" >/dev/null; then + echo "Output mismatch ($split_dwarf.localice), Mach object files, not knowing how to verify" + fi + if test -z "$chroot_disabled"; then + if ! diff "$split_dwarf".remoteice "$split_dwarf" >/dev/null; then + echo "Output mismatch ($split_dwarf.remoteice), Mach object files, not knowing how to verify" + fi + fi + else + # possibly cygwin .o file, no idea how to check they are the same if they are not 100% identical + if ! diff "$split_dwarf".localice "$split_dwarf" >/dev/null; then + echo "Output mismatch ($split_dwarf.localice), assuming Cygwin object files, not knowing how to verify" + fi + if test -z "$chroot_disabled"; then + if ! diff "$split_dwarf".remoteice "$split_dwarf" >/dev/null; then + echo "Output mismatch ($split_dwarf.remoteice), assuming Cygwin object files, not knowing how to verify" + fi + fi fi fi if test $localice_exit -ne 0; then @@ -469,7 +742,16 @@ run_ice() echo fi if test -n "$output"; then - rm -f "$output" "$output".localice "$output".remoteice "$output".readelf.txt "$output".local.readelf.txt "$output".remote.readelf.txt + if test -n "$keepoutput"; then + if test -z "$chroot_disabled"; then + mv "$output".remoteice "$output" + else + mv "$output".localice "$output" + fi + else + rm -f "output" + fi + rm -f "$output".localice "$output".remoteice "$output".readelf.txt "$output".local.readelf.txt "$output".remote.readelf.txt fi if test -n "$split_dwarf"; then rm -f "$split_dwarf" "$split_dwarf".localice "$split_dwarf".remoteice "$split_dwarf".readelf.txt "$split_dwarf".local.readelf.txt "$split_dwarf".remote.readelf.txt @@ -482,18 +764,13 @@ make_test() # make test - actually try something somewhat realistic. Since each node is set up to serve # only 2 jobs max, at least some of the 10 jobs should be built remotely. - # All the test compiles are small, and should produce small .o files, which will make the scheduler - # stats for those jobs, so it will not actually have any statistics about nodes (make_test is intentionally - # run early to ensure this). So run the test once, first time without stats, second time with stats - # (make.h has large data to ensure the first make_test run will finally produce statistics). - run_number=$1 - - echo Running make test $run_number. - reset_logs remote "make test $run_number" + echo Running make test. + reset_logs "" "make test" make -f Makefile.test OUTDIR="$testdir" clean -s - PATH="$prefix"/libexec/icecc/bin:/usr/local/bin:/usr/bin:/bin ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log make -f Makefile.test OUTDIR="$testdir" -j10 -s 2>>"$testdir"/stderr.log + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + make -f Makefile.test OUTDIR="$testdir" CXX="${icecc} $TESTCXX" -j10 -s 2>>"$testdir"/stderr.log if test $? -ne 0 -o ! -x "$testdir"/maketest; then - echo Make test $run_number failed. + echo Make test failed. stop_ice 0 abort_tests fi @@ -502,57 +779,66 @@ make_test() check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_message icecc "Have to use host 127.0.0.1:10247" check_log_message_count icecc 1 "" - if test $run_number -eq 1; then - check_log_message scheduler "no job stats - returning randomly selected" - else - check_log_error scheduler "no job stats - returning randomly selected" - fi - echo Make test $run_number successful. + check_log_message remoteice1 "Remote compilation completed with exit code 0" + check_log_error remoteice1 "Remote compilation aborted with exit code" + check_log_error remoteice1 "Remote compilation exited with exit code $expected_exit" + check_log_message remoteice2 "Remote compilation completed with exit code 0" + check_log_error remoteice2 "Remote compilation aborted with exit code" + check_log_error remoteice2 "Remote compilation exited with exit code $expected_exit" + echo Make test successful. echo make -f Makefile.test OUTDIR="$testdir" clean -s } # 1st argument, if set, means we run without scheduler -icerun_test() +icerun_serialize_test() { # test that icerun really serializes jobs and only up to 2 (max jobs of the local daemon) are run at any time noscheduler= test -n "$1" && noscheduler=" (no scheduler)" echo "Running icerun${noscheduler} test." - reset_logs local "icerun${noscheduler} test" + reset_logs "" "icerun${noscheduler} test" # remove . from PATH if set save_path=$PATH - export PATH=`echo $PATH | sed 's/:.:/:/' | sed 's/^.://' | sed 's/:.$//'` + export PATH=$(echo $PATH | sed 's/:.:/:/' | sed 's/^.://' | sed 's/:.$//') rm -rf "$testdir"/icerun mkdir -p "$testdir"/icerun - for i in `seq 1 10`; do + if test -n "$valgrind"; then + export ICERUN_TEST_VALGRIND=1 + fi + for i in $(seq 1 10); do path=$PATH if test $i -eq 1; then # check icerun with absolute path - testbin=`pwd`/icerun-test.sh + testbin=$(pwd)/icerun-test.sh elif test $i -eq 2; then # check with relative path testbin=../tests/icerun-test.sh elif test $i -eq 3; then # test with PATH testbin=icerun-test.sh - path=`pwd`:$PATH + path=$(pwd):$PATH else testbin=./icerun-test.sh fi - PATH=$path ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "$prefix"/bin/icerun $testbin "$testdir"/icerun $i & + PATH=$path ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + $valgrind "${icerun}" $testbin "$testdir"/icerun $i & done + unset ICERUN_TEST_VALGRIND timeout=100 + if test -n "$valgrind"; then + timeout=500 + fi seen2= while true; do - runcount=`ls -1 "$testdir"/icerun/running* 2>/dev/null | wc -l` + runcount=$(ls -1 "$testdir"/icerun/running* 2>/dev/null | wc -l) if test $runcount -gt 2; then echo "Icerun${noscheduler} test failed, more than expected 2 processes running." stop_ice 0 abort_tests fi test $runcount -eq 2 && seen2=1 - donecount=`ls -1 "$testdir"/icerun/done* 2>/dev/null | wc -l` + donecount=$(ls -1 "$testdir"/icerun/done* 2>/dev/null | wc -l) if test $donecount -eq 10; then break fi @@ -571,126 +857,301 @@ icerun_test() abort_tests fi - # check that plain 'icerun-test.sh' doesn't work for the current directory (i.e. ./ must be required just like with normal execution) - ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "$prefix"/bin/icerun icerun-test.sh "$testdir"/icerun 0 & - icerun_test_pid=$! - timeout=20 - while true; do - if ! kill -0 $icerun_test_pid 2>/dev/null; then - break - fi - sleep 0.1 - timeout=$((timeout-1)) - if test $timeout -eq 0; then - echo "Icerun${noscheduler} test timed out." - stop_ice 0 - abort_tests - fi - done - flush_logs check_logs_for_generic_errors + check_log_message_count icecc 10 "" check_log_error icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" check_log_error icecc "local build forced" - check_log_message_count icecc 11 "" - check_log_message_count icecc 1 "couldn't find any" - check_log_message_count icecc 1 "could not find icerun-test.sh in PATH." echo "Icerun${noscheduler} test successful." echo rm -r "$testdir"/icerun export PATH=$save_path } -# Check that icecc --build-native works. -buildnativetest() +icerun_nopath_test() { - echo Running icecc --build-native test. - local tgz=$(PATH="$prefix"/bin:/bin:/usr/bin icecc --build-native 2>&1 | \ - grep "^creating .*\.tar\.gz$" | sed -e "s/^creating //") - if test $? -ne 0; then - echo icecc --build-native test failed. - abort_tests - fi - rm -f $tgz - echo icecc --build-native test successful. + reset_logs "" "icerun nopath test" + echo "Running icerun nopath test." + # check that plain 'icerun-test.sh' doesn't work for the current directory (i.e. ./ must be required just like with normal execution) + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + $valgrind "${icerun}" icerun-test.sh + check_log_error icecc "invoking:" + check_log_message icecc "couldn't find any" + check_log_message icecc "could not find icerun-test.sh in PATH." + echo "Icerun nopath test successful." echo } -# Check that icecc recursively invoking itself is detected. -recursive_test() +icerun_nocompile_test() { - echo Running recursive check test. - reset_logs local "recursive check" - - PATH="$prefix"/lib/icecc/bin:"$prefix"/bin:/usr/local/bin:/usr/bin:/bin ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log "${icecc}" ./recursive_g++ -Wall -c plain.c -o plain.o 2>>"$testdir"/stderr.log - if test $? -ne 111; then - echo Recursive check test failed. + # check that 'icerun gcc' still only runs the command without trying a remote compile + reset_logs "" "icerun${noscheduler} nocompile test" + echo "Running icerun nocompile test." + rm -rf -- "$testdir"/fakegcc + mkdir -p "$testdir"/fakegcc + echo '#! /bin/sh' > "$testdir"/fakegcc/gcc + echo 'echo "$@" >' "$testdir"/fakegcc/output >> "$testdir"/fakegcc/gcc + echo 'exit 44' >> "$testdir"/fakegcc/gcc + chmod +x "$testdir"/fakegcc/gcc + args="-Wall a.c b.c -c -s" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + PATH="$testdir"/fakegcc:$PATH $valgrind "${icerun}" gcc $args + if test $? -ne 44; then + echo Error, icerun gcc failed. stop_ice 0 abort_tests fi - flush_logs - check_logs_for_generic_errors - check_log_message icecc "icecream seems to have invoked itself recursively!" - echo Recursive check test successful. + check_log_message icecc "invoking: $testdir/fakegcc/gcc $args\$" + rm -rf -- "$testdir"/fakegcc + echo "Icerun nocompile test successful." echo } -# Check that transfering Clang plugin(s) works. While at it, also test ICECC_EXTRAFILES. -clangplugintest() +symlink_wrapper_test() { - echo Running Clang plugin test. - reset_logs remote "clang plugin" + cxxwrapper="$wrapperdir/$(basename $TESTCXX)" + if ! test -e "$cxxwrapper"; then + echo Cannot find wrapper symlink for $TESTCXX, symlink wrapper test skipped. + echo + skipped_tests="$skipped_tests symlink_wrapper" + return + fi + reset_logs "local" "symlink wrapper test" + echo "Running symlink wrapper test." + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=localice $valgrind "$cxxwrapper" -Wall -c plain.cpp + if test $? -ne 0; then + echo Error, local symlink wrapper test failed. + stop_ice 0 + abort_tests + fi + flush_logs + check_logs_for_generic_errors + check_log_error icecc "" + check_log_error icecc "Have to use host 127.0.0.1:10246" + check_log_error icecc "Have to use host 127.0.0.1:10247" + check_log_message icecc "building myself, but telling localhost" + check_log_message icecc "invoking: $TESTCXX -Wall" - # TODO This should be able to also handle the clangpluginextra.txt argument without the absolute path. - ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log ICECC_EXTRAFILES=clangpluginextra.txt $valgrind "${icecc}" \ - $CLANGXX -Wall -c -Xclang -load -Xclang "$builddir"/clangplugin.so -Xclang -add-plugin -Xclang icecreamtest -Xclang -plugin-arg-icecreamtest -Xclang `realpath -s clangpluginextra.txt` clangplugintest.cpp -o "$testdir"/clangplugintest.o 2>>"$testdir"/stderr.log + mark_logs "remote" "symlink wrapper test" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + PATH=$(dirname $TESTCXX):$PATH ICECC_PREFERRED_HOST=remoteice1 $valgrind "$cxxwrapper" -Wall -c plain.cpp if test $? -ne 0; then - echo Clang plugin test failed. + echo Error, remote symlink wrapper test failed. stop_ice 0 abort_tests fi flush_logs check_logs_for_generic_errors + check_log_error icecc "" check_log_message icecc "Have to use host 127.0.0.1:10246" check_log_error icecc "Have to use host 127.0.0.1:10247" check_log_error icecc "building myself, but telling localhost" - check_log_error icecc "local build forced" - check_log_error icecc "" - check_log_message_count stderr 1 "clangplugintest.cpp:3:5: warning: Icecream plugin found return false" - check_log_message_count stderr 1 "warning: Extra file check successful" - check_log_error stderr "Extra file open error" - check_log_error stderr "Incorrect number of arguments" - check_log_error stderr "File contents do not match" - echo Clang plugin test successful. + check_log_message icecc "preparing source to send: $TESTCXX -Wall" + + echo "Symlink wrapper test successful." echo - rm "$testdir"/clangplugintest.o } -# Both clang and gcc4.8+ produce different debuginfo depending on whether the source file is -# given on the command line or using stdin (which is how icecream does it), so do not compare output. -# But check the functionality is identical to local build. -# 1st argument is compile command, without -o argument. -# 2nd argument is first line of debug at which to start comparing. -debug_test() +# Check that icecc --build-native works. +buildnativetest() { - # debug tests fail when the daemon is not running in the install directory - # Sanitizers will not give good output on error as a result - kill_daemon localice - ICECC_TEST_SOCKET="$testdir"/socket-localice $valgrind "${prefix}/sbin/iceccd" -s localhost:8767 -b "$testdir"/envs-localice -l "$testdir"/localice.log -N localice -v -v -v --no-remote -m 2 & - localice_pid=$! - echo ${localice_pid} > "$testdir"/${localice}.pid - wait_for_proc_sleep 10 $localice_pid + echo Running icecc --build-native test. + reset_logs "local" "Build native" + test_build_native_helper $TESTCC 1 + if test $? -ne 0; then + echo Icecc --build-native test failed. + cat "$testdir"/icecc-build-native-output + stop_ice 0 + abort_tests + fi + echo Icecc --build-native test successful. + echo +} + +buildnativewithsymlinktest() +{ + reset_logs local "Native environment with symlink" + echo Testing native environment with a compiler symlink. + rm -rf -- "$testdir"/wrappers + mkdir -p "$testdir"/wrappers + ln -s $(command -v $TESTCC) "$testdir"/wrappers/ + ln -s $(command -v $TESTCXX) "$testdir"/wrappers/ + test_build_native_helper "$testdir"/wrappers/$(basename $TESTCC) 0 + if test $? -ne 0; then + echo Testing native environment with a compiler symlink failed. + cat "$testdir"/icecc-build-native-output + stop_ice 0 + abort_tests + fi + rm -rf -- "$testdir"/wrappers + echo Testing native environment with a compiler symlink successful. + echo +} + +buildnativewithwrappertest() +{ + reset_logs local "Native environment with a compiler wrapper" + echo Testing native environment with a compiler wrapper. + rm -rf -- "$testdir"/wrappers + mkdir -p "$testdir"/wrappers + echo '#! /bin/sh' > "$testdir"/wrappers/$(basename $TESTCC) + echo exec $TESTCC '"$@"' >> "$testdir"/wrappers/$(basename $TESTCC) + echo '#! /bin/sh' > "$testdir"/wrappers/$(basename $TESTCXX) + echo exec $TESTCXX '"$@"' >> "$testdir"/wrappers/$(basename $TESTCXX) + chmod +x "$testdir"/wrappers/$(basename $TESTCC) "$testdir"/wrappers/$(basename $TESTCXX) + test_build_native_helper "$testdir"/wrappers/$(basename $TESTCC) 0 + if test $? -ne 0; then + echo Testing native environment with a compiler symlink failed. + cat "$testdir"/icecc-build-native-output + stop_ice 0 + abort_tests + fi + rm -rf -- "$testdir"/wrappers + echo Testing native environment with a compiler symlink successful. + echo +} +test_build_native_helper() +{ + compiler=$1 + add_skip=$2 + pushd "$testdir" >/dev/null + ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log ${icecc} --build-native $compiler > "$testdir"/icecc-build-native-output + if test $? -ne 0; then + return 1 + fi + local tgz=$(grep "^creating .*\.tar\.gz$" "$testdir"/icecc-build-native-output | sed -e "s/^creating //") + if test -z "$tgz"; then + return 2 + fi + sudo -n -- ${icecc_test_env} -q "$tgz" + retcode=$? + if test $retcode -eq 1; then + echo Cannot verify environment, use sudo, skipping test. + if test "$add_skip" = "1"; then + skipped_tests="$skipped_tests $testtype" + fi + elif test $retcode -ne 0; then + echo icecc_test_env failed to validate the environment + return 3 + fi + rm -f $tgz "$testdir"/icecc-build-native-output + popd >/dev/null + return 0 +} + +# Check that icecc recursively invoking itself is detected. +recursive_test() +{ + echo Running recursive check test. + reset_logs "" "recursive check" + + recursive_tester= + if test -n "$using_clang"; then + recursive_tester=./recursive_clang++ + elif test -n "$using_gcc"; then + recursive_tester=./recursive_g++ + fi + PATH="$prefix"/lib/icecc/bin:"$prefix"/bin:/usr/local/bin:/usr/bin:/bin ICECC_TEST_SOCKET="$testdir"/socket-localice \ + ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log "${icecc}" ./${recursive_tester} -Wall -c plain.c -o plain.o 2>>"$testdir"/stderr.log + if test $? -ne 111; then + echo Recursive check test failed. + stop_ice 0 + abort_tests + fi + flush_logs + check_logs_for_generic_errors + check_log_message icecc "icecream seems to have invoked itself recursively!" + echo Recursive check test successful. + echo + + # But a recursive invocations in the style of icerun->icecc is allowed. + echo Running recursive icerun check test. + reset_logs "" "recursive icerun check" + + PATH="$prefix"/lib/icecc/bin:"$prefix"/bin:/usr/local/bin:/usr/bin:/bin ICECC_TEST_SOCKET="$testdir"/socket-localice \ + ICECC_TEST_REMOTEBUILD=1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log "${icerun}" ${icecc} $TESTCC -Wall -c plain.c -o "$testdir"/plain.o 2>>"$testdir"/stderr.log + if test $? -ne 0; then + echo Recursive icerun check test failed. + stop_ice 0 + abort_tests + fi + rm -f "$testdir"/plain.o + flush_logs + check_logs_for_generic_errors + check_log_error icecc "icecream seems to have invoked itself recursively!" + check_log_message_count icecc 1 "recursive invocation from icerun" + echo Recursive icerun check test successful. + echo +} + +# Check that transfering Clang plugin(s) works. While at it, also test ICECC_EXTRAFILES. +clangplugintest() +{ + echo Running Clang plugin test. + reset_logs "" "clang plugin" + + if test -z "$LLVM_CONFIG"; then + LLVM_CONFIG=llvm-config + fi + clangcxxflags=$($LLVM_CONFIG --cxxflags 2>>"$testdir"/stderr.log) + if test $? -ne 0; then + echo Cannot find Clang development headers, clang plugin test skipped. + echo + skipped_tests="$skipped_tests clangplugin" + return + fi + echo Clang plugin compile flags: $clangcxxflags + $TESTCXX -shared -fPIC -g -o "$testdir"/clangplugin.so clangplugin.cpp $clangcxxflags 2>>"$testdir"/stderr.log + if test $? -ne 0; then + echo Failed to compile clang plugin, clang plugin test skipped. + echo + skipped_tests="$skipped_tests clangplugin" + fi + + # TODO This should be able to also handle the clangpluginextra.txt argument without the absolute path. + export ICECC_EXTRAFILES=clangpluginextra.txt + run_ice "$testdir/clangplugintest.o" "remote" 0 $TESTCXX -Wall -c -Xclang -load -Xclang "$testdir"/clangplugin.so \ + -Xclang -add-plugin -Xclang icecreamtest -Xclang -plugin-arg-icecreamtest -Xclang $(realpath -s clangpluginextra.txt) \ + clangplugintest.cpp -o "$testdir"/clangplugintest.o + unset ICECC_EXTRAFILES + also_remote= + if test -z "$chroot_disabled"; then + also_remote=".remoteice" + fi + for type in "" ".localice" $also_remote; do + check_section_log_message_count stderr${type} 1 "clangplugintest.cpp:3:5: warning: Icecream plugin found return false" + check_section_log_message_count stderr${type} 1 "warning: Extra file check successful" + check_section_log_error stderr${type} "Extra file open error" + check_section_log_error stderr${type} "Incorrect number of arguments" + check_section_log_error stderr${type} "File contents do not match" + done + echo Clang plugin test successful. + echo +} + +# Both clang and gcc4.8+ produce different debuginfo depending on whether the source file is +# given on the command line or using stdin (which is how icecream does it), so do not compare output. +# But check the functionality is identical to local build. +# 1st argument is compile command, without -o argument. +# 2nd argument is first line of debug at which to start comparing. +debug_test() +{ compiler="$1" args="$2" cmd="$1 $2" debugstart="$3" echo "Running debug test ($cmd)." - reset_logs remote "debug test ($cmd)" + reset_logs "" "debug test ($cmd)" - ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" \ + preferred=remoteice1 + if test -n "$chroot_disabled"; then + preferred=localice + fi + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=$preferred ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" \ $cmd -o "$testdir"/debug-remote.o 2>>"$testdir"/stderr.log if test $? -ne 0; then echo Debug test compile failed. @@ -700,11 +1161,22 @@ debug_test() flush_logs check_logs_for_generic_errors - check_log_message icecc "Have to use host 127.0.0.1:10246" - check_log_error icecc "Have to use host 127.0.0.1:10247" - check_log_error icecc "building myself, but telling localhost" - check_log_error icecc "local build forced" - check_log_error icecc "" + if test -z "$chroot_disabled"; then + check_log_message icecc "Have to use host 127.0.0.1:10246" + check_log_error icecc "Have to use host 127.0.0.1:10247" + check_log_error icecc "building myself, but telling localhost" + check_log_error icecc "local build forced" + check_log_error icecc "" + check_log_message remoteice1 "Remote compilation completed with exit code 0" + check_log_error remoteice1 "Remote compilation aborted with exit code" + check_log_error remoteice1 "Remote compilation exited with exit code" + else + check_log_message icecc "building myself, but telling localhost" + check_log_error icecc "Have to use host 127.0.0.1:10246" + check_log_error icecc "Have to use host 127.0.0.1:10247" + check_log_error icecc "local build forced" + check_log_error icecc "" + fi $compiler -o "$testdir"/debug-remote "$testdir"/debug-remote.o if test $? -ne 0; then echo Linking in debug test failed. @@ -718,19 +1190,6 @@ debug_test() abort_tests fi - # gcc-4.8+ has -grecord-gcc-switches, which makes the .o differ because of the extra flags the daemon adds, - # this changes DW_AT_producer and also offsets - local remove_debug_info="s/\(Length\|DW_AT_\(GNU_dwo_\(id\|name\)\|comp_dir\|producer\|linkage_name\|name\)\).*/\1/g" - local remove_offset_number="s/<[A-Fa-f0-9]*>/<>/g" - local remove_size_of_area="s/\(Size of area in.*section:\)\s*[0-9]*/\1/g" - local remove_debug_pubnames="/^\s*Offset\s*Name/,/^\s*$/s/\s*[A-Fa-f0-9]*\s*//" - readelf -wlLiaprmfFoRt "$testdir"/debug-remote.o | sed -e 's/offset: 0x[0-9a-fA-F]*//g' \ - -e 's/[ ]*--param ggc-min-expand.*heapsize\=[0-9]\+//g' \ - -e "$remove_debug_info" \ - -e "$remove_offset_number" \ - -e "$remove_size_of_area" \ - -e "$remove_debug_pubnames" > "$testdir"/readelf-remote.txt - $cmd -o "$testdir"/debug-local.o 2>>"$testdir"/stderr.log if test $? -ne 0; then echo Debug test compile failed. @@ -749,67 +1208,85 @@ debug_test() stop_ice 0 abort_tests fi - readelf -wlLiaprmfFoRt "$testdir"/debug-local.o | sed -e 's/offset: 0x[0-9a-fA-F]*//g' \ - -e "$remove_debug_info" \ - -e "$remove_offset_number" \ - -e "$remove_size_of_area" \ - -e "$remove_debug_pubnames" > "$testdir"/readelf-local.txt - - if ! diff -q "$testdir"/debug-output-local.txt "$testdir"/debug-output-remote.txt ; then + if ! diff "$testdir"/debug-output-local.txt "$testdir"/debug-output-remote.txt >/dev/null; then echo Gdb output different. + echo ===================== + diff -u "$testdir"/debug-output-local.txt "$testdir"/debug-output-remote.txt + echo ===================== stop_ice 0 abort_tests fi - if ! diff -q "$testdir"/readelf-local.txt "$testdir"/readelf-remote.txt ; then - echo Readelf output different. - stop_ice 0 - abort_tests + + # gcc-4.8+ has -grecord-gcc-switches, which makes the .o differ because of the extra flags the daemon adds, + # this changes DW_AT_producer and also offsets + local remove_debug_info="s/\(Length\|DW_AT_\(GNU_dwo_\(id\|name\)\|comp_dir\|producer\|linkage_name\|name\)\).*/\1/g" + local remove_offset_number="s/<[A-Fa-f0-9]*>/<>/g" + local remove_size_of_area="s/\(Size of area in.*section:\)\s*[0-9]*/\1/g" + local remove_debug_pubnames="/^\s*Offset\s*Name/,/^\s*$/s/\s*[A-Fa-f0-9]*\s*//" + if file "$testdir"/debug-remote.o | grep ELF >/dev/null; then + readelf -wlLiaprmfFoRt "$testdir"/debug-remote.o | sed -e 's/offset: 0x[0-9a-fA-F]*//g' \ + -e 's/[ ]*--param ggc-min-expand.*heapsize\=[0-9]\+//g' \ + -e "$remove_debug_info" \ + -e "$remove_offset_number" \ + -e "$remove_size_of_area" \ + -e "$remove_debug_pubnames" > "$testdir"/readelf-remote.txt + readelf -wlLiaprmfFoRt "$testdir"/debug-local.o | sed -e 's/offset: 0x[0-9a-fA-F]*//g' \ + -e "$remove_debug_info" \ + -e "$remove_offset_number" \ + -e "$remove_size_of_area" \ + -e "$remove_debug_pubnames" > "$testdir"/readelf-local.txt + if ! diff "$testdir"/readelf-local.txt "$testdir"/readelf-remote.txt >/dev/null; then + echo Readelf output different. + echo ===================== + diff -u "$testdir"/readelf-local.txt "$testdir"/readelf-remote.txt + echo ===================== + stop_ice 0 + abort_tests + fi + elif file "$testdir"/debug-remote.o | grep Mach >/dev/null; then + # No idea how to check they are the same if they are not 100% identical + if ! diff "$testdir"/debug-local.o "$testdir"/debug-remote.o >/dev/null; then + echo "Output mismatch, Mach object files, not knowing how to verify" + fi + else + # possibly cygwin .o file, no idea how to check they are the same if they are not 100% identical + if ! diff "$testdir"/debug-local.o "$testdir"/debug-remote.o >/dev/null; then + echo "Output mismatch, assuming Cygwin object files, not knowing how to verify" + fi fi - rm "$testdir"/debug-remote.o "$testdir"/debug-local.o "$testdir"/debug-remote "$testdir"/debug-local "$testdir"/debug-*-*.txt "$testdir"/readelf-*.txt + rm -f "$testdir"/debug-remote.o "$testdir"/debug-local.o "$testdir"/debug-remote "$testdir"/debug-local "$testdir"/debug-*-*.txt "$testdir"/readelf-*.txt echo Debug test successful. echo - - # restart local daemon to the as built one - kill_daemon localice - start_iceccd localice --no-remote -m 2 } zero_local_jobs_test() { - echo Running zero_local_jobs test. + echo Running zero local jobs test. - reset_logs local "Running zero_local_jobs test" - reset_logs remote "Running zero_local_jobs test" + reset_logs "" "Running zero local jobs test" kill_daemon localice start_iceccd localice --no-remote -m 0 + wait_for_ice_startup_complete localice libdir="${testdir}/libs" rm -rf "${libdir}" mkdir "${libdir}" - reset_logs remote $GXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" - echo Running: $GXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" - ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $GXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" + mark_logs remote $TESTCXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" + echo Running: $TESTCXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $TESTCXX -Wall -Werror -c testfunc.cpp -o "${testdir}/testfunc.o" if [[ $? -ne 0 ]]; then echo "failed to build testfunc.o" - grep -q "AddressSanitizer failed to allocate" "$testdir"/iceccdstderr_remoteice1.log - if [[ $? ]]; then - echo "address sanitizer broke, skipping test" - skipped_tests="$skipped_tests zero_local_jobs_test" - reset_logs local "skipping zero_local_jobs_test" - start_iceccd localice --no-remote -m 2 - return 0 - fi stop_ice 0 abort_tests fi - reset_logs remote $GXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" - echo Running: $GXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" - ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice2 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $GXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" + mark_logs remote $TESTCXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" + echo Running: $TESTCXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice2 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $TESTCXX -Wall -Werror -c testmainfunc.cpp -o "${testdir}/testmainfunc.o" if test $? -ne 0; then echo "Error, failed to compile testfunc.cpp" stop_ice 0 @@ -829,9 +1306,9 @@ zero_local_jobs_test() abort_tests fi - reset_logs local $GXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" - echo Running: $GXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" - ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=localice ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $GXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" 2>"$testdir"/stderr.remoteice + mark_logs local $TESTCXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" + echo Running: $TESTCXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=localice ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log $valgrind "${icecc}" $TESTCXX -Wall -Werror "-L${libdir}" "-ltestlib1" "-ltestlib2" -o "${testdir}/linkedapp" 2>>"$testdir"/stderr.log if test $? -ne 0; then echo "Error, failed to link testlib1 and testlib2 into linkedapp" stop_ice 0 @@ -849,66 +1326,255 @@ zero_local_jobs_test() kill_daemon localice start_iceccd localice --no-remote -m 2 + wait_for_ice_startup_complete localice - echo zero_local_jobs test successful. + echo Zero local jobs test successful. echo } +ccache_test() +{ + if ! command -v ccache >/dev/null; then + echo Could not find ccache, ccache tests skipped. + echo + skipped_tests="$skipped_tests ccache" + return + fi + reset_logs "verify" "Testing ccache error redirect" + echo Testing ccache error redirect. + # First check that everything actually works (the test itself doesn't have icecc debug enabled and uses only stderr, because of ccache). + rm -rf "$testdir/ccache" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log + check_log_message icecc "ICECC_VERSION has to point to an existing file to be installed testbrokenenv" + # Second run, will get cached result, so there's no icecc error in ccache's cached stderr + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug ICECC_LOGFILE="$testdir"/icecc.log \ + CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log + check_log_message_count icecc 1 "ICECC_VERSION has to point to an existing file to be installed testbrokenenv" + + # Now run it again, this time without icecc debug redirected, so that ccache has to handle icecc's stderr. + reset_logs "cache" "Testing ccache error redirect" + rm -rf "$testdir/ccache" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 ICECC_DEBUG=debug \ + CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log + if cat_log_last_mark stderr | grep -q "UNCACHED_ERR_FD provides an invalid file descriptor"; then + echo UNCACHED_ERR_FD provided by ccache is invalid, skipping test. + echo + skipped_tests="$skipped_tests ccache" + return + fi + if ! cat_log_last_mark stderr | grep -q "ICECC_VERSION has to point to an existing file to be installed testbrokenenv"; then + # If ccache's UNCACHED_ERR_FD handling is broken, the fd number may match an unrelated open fd, in which case the log message just disappears. + echo Missing icecc stderr output from ccache, assuming broken ccache, skipping test. + echo + skipped_tests="$skipped_tests ccache" + return + fi + # second run, will get cached result, so there's no icecc error in ccache's cached stderr + reset_logs "test" "Testing ccache error redirect" + ICECC_TEST_SOCKET="$testdir"/socket-localice ICECC_TEST_REMOTEBUILD=1 ICECC_PREFERRED_HOST=remoteice1 \ + CCACHE_PREFIX=${icecc} CCACHE_DIR="$testdir"/ccache ICECC_VERSION=testbrokenenv ccache $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o 2>>"$testdir"/stderr.log + check_section_log_error stderr "ICECC_VERSION has to point to an existing file to be installed testbrokenenv" + echo Testing ccache error redirect successful. + echo +} + +# All log files that are used by tests. Done here to keep the list in just one place. +daemonlogs="scheduler scheduler2 localice remoteice1 remoteice2" +otherlogs="icecc stderr stderr.localice stderr.remoteice" +alltestlogs="$daemonlogs $otherlogs" + +# Call this at the start of a complete test (e.g. testing a feature). If a test fails, logs before this point will not be dumped. reset_logs() { type="$1" shift - # in case icecc.log or stderr.log don't exit, avoid error message - touch "$testdir"/icecc.log "$testdir"/stderr.log - for log in scheduler localice remoteice1 remoteice2 icecc stderr; do - # save (append) previous log - cat "$testdir"/${log}.log >> "$testdir"/${log}_all.log - # and start a new one - echo ============== > "$testdir"/${log}.log - echo "Test ($type): $@" >> "$testdir"/${log}.log - echo ============== >> "$testdir"/${log}.log - if test "$log" != icecc -a "$log" != stderr; then - pid=${log}_pid - if test -n "${!pid}"; then - kill -HUP ${!pid} - fi - fi - done + last_reset_log_mark=$flush_log_mark + mark_logs $type "$@" } -finish_logs() +# Call this at the start of a sub-test (e.g. remote vs local build). Functions such as check_log_message will not check before the mark. +mark_logs() { - for log in scheduler localice remoteice1 remoteice2 icecc stderr; do - cat "$testdir"/${log}.log >> "$testdir"/${log}_all.log - rm -f "$testdir"/${log}.log + type="$1" + shift + last_section_log_mark=$flush_log_mark + echo ================ > "$testdir"/log_header.txt + if test -n "$type"; then + echo "= Test ($type): $@" >> "$testdir"/log_header.txt + else + echo "= Test : $@" >> "$testdir"/log_header.txt + fi + echo ================ >> "$testdir"/log_header.txt + # Make daemons write the header. + flush_logs + manual="$otherlogs" + for daemon in $daemonlogs; do + pid=${daemon}_pid + if test -n "${!pid}"; then + kill -0 ${!pid} + if test $? -ne 0; then + manual="$manual $daemon" + fi + else + manual="$manual $daemon" + fi done + for log in $manual; do + cat "$testdir"/log_header.txt >> "$testdir"/${log}.log + done + rm "$testdir"/log_header.txt } flush_logs() { - for daemon in scheduler localice remoteice1 remoteice2; do + echo "=${flush_log_mark}=" > "$testdir"/flush_log_mark.txt + wait_for= + manual="$otherlogs" + for daemon in $daemonlogs; do pid=${daemon}_pid if test -n "${!pid}"; then kill -HUP ${!pid} + if test $? -eq 0; then + wait_for="$wait_for $daemon" + else + manual="$manual $daemon" + fi + else + manual="$manual $daemon" + fi + done + # wait for all daemons to log the mark in their log + while test -n "$wait_for"; do + ready=1 + for daemon in $wait_for; do + if ! grep -q "flush log mark: =${flush_log_mark}=" "$testdir"/${daemon}.log; then + ready= + fi + done + if test -n "$ready"; then + break fi done + for log in $manual; do + echo "flush log mark: =${flush_log_mark}=" >> "$testdir"/${log}.log + done + rm "$testdir"/flush_log_mark.txt + flush_log_mark=$((flush_log_mark + 1)) +} + +dump_logs() +{ + for log in $alltestlogs; do + # Skip logs that have only headers and flush marks + if cat_log_last_section ${log} | grep -q -v "^="; then + echo ------------------------------------------------ + echo "Log: ${log}" + cat_log_last_section ${log} + fi + done + valgrind_logs=$(ls "$testdir"/valgrind-*.log 2>/dev/null) + for log in $valgrind_logs; do + has_error= + if test -n "$valgrind_error_markers"; then + if grep -q ICEERRORBEGIN ${log}; then + has_error=1 + fi + else + # Let's guess that every error message has this. + if grep -q '^==[0-9]*== at ' ${log}; then + has_error=1 + fi + fi + if test -n "$has_error"; then + echo ------------------------------------------------ + echo "Log: ${log}" | sed "s#${testdir}/##" + grep -v ICEERRORBEGIN ${log} | grep -v ICEERROREND + fi + done +} + +cat_log_last_mark() +{ + log="$1" + grep -A 100000 "flush log mark: =${last_section_log_mark}=" "$testdir"/${log}.log | grep -v "flush log mark: " +} + +cat_log_last_section() +{ + log="$1" + grep -A 100000 "flush log mark: =${last_reset_log_mark}=" "$testdir"/${log}.log | grep -v "flush log mark: " } +# Optional arguments, in this order: +# - stderrfix - the same as for run_ice check_logs_for_generic_errors() { + stderrfix= + if test "$1" = "stderrfix"; then + if test -n "$using_gcc"; then + stderrfix=1 + shift + fi + fi check_log_error scheduler "that job isn't handled by" check_log_error scheduler "the server isn't the same for job" check_log_error icecc "got exception " + check_log_error icecc "found another non option on command line. Two input files" + for log in localice remoteice1 remoteice2; do + check_log_error $log "Ignoring bogus version" + check_log_error $log "scheduler closed connection" + done + for log in scheduler icecc localice remoteice1 remoteice2; do + check_log_error $log "internal error" + if test -n "$stderrfix"; then + # If the client finds out it needs to do a local rebuild because of the need to fix + # stderr, it will simply close the connection to the remote daemon, so the remote + # daemon may get broken pipe when trying to write the object file. That's harmless. + if test "$log" != remoteice1 -a "$log" != "remoteice2"; then + check_log_error $log "setting error state for channel" + fi + fi + done # consider all non-fatal errors such as running out of memory on the remote # still as problems, except for: # 102 - -fdiagnostics-show-caret forced local build (gcc-4.8+) - check_log_error_except icecc "local build forced" "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + if test -n "$using_gcc"; then + check_log_error_except icecc "local build forced" "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + else + check_log_error icecc "local build forced" + fi + has_valgrind_error= + if test -n "$valgrind_error_markers"; then + if grep -q "ICEERRORBEGIN" "$testdir"/valgrind-*.log 2>/dev/null; then + has_valgrind_error=1 + fi + else + if grep -q '^==[0-9]*== at ' "$testdir"/valgrind-*.log 2>/dev/null; then + has_valgrind_error=1 + fi + fi + if test -n "$has_valgrind_error"; then + echo Valgrind detected an error, aborting. + stop_ice 0 + abort_tests + fi } check_log_error() { log="$1" - if grep -q "$2" "$testdir"/${log}.log; then + if cat_log_last_mark ${log} | grep -q "$2"; then + echo "Error, $log log contains error: $2" + stop_ice 0 + abort_tests + fi +} + +check_section_log_error() +{ + log="$1" + if cat_log_last_section ${log} | grep -q "$2"; then echo "Error, $log log contains error: $2" stop_ice 0 abort_tests @@ -920,7 +1586,7 @@ check_log_error() check_log_error_except() { log="$1" - if cat "$testdir"/${log}.log | grep -v "$3" | grep -q "$2" ; then + if cat_log_last_mark ${log} | grep -v "$3" | grep -q "$2" ; then echo "Error, $log log contains error: $2" stop_ice 0 abort_tests @@ -930,7 +1596,17 @@ check_log_error_except() check_log_message() { log="$1" - if ! grep -q "$2" "$testdir"/${log}.log; then + if ! cat_log_last_mark ${log} | grep -q "$2"; then + echo "Error, $log log does not contain: $2" + stop_ice 0 + abort_tests + fi +} + +check_section_log_message() +{ + log="$1" + if ! cat_log_last_section ${log} | grep -q "$2"; then echo "Error, $log log does not contain: $2" stop_ice 0 abort_tests @@ -941,7 +1617,7 @@ check_log_message_count() { log="$1" expected_count="$2" - count=`grep "$3" "$testdir"/${log}.log | wc -l` + count=$(cat_log_last_mark ${log} | grep "$3" | wc -l) if test $count -ne $expected_count; then echo "Error, $log log does not contain expected count (${count} vs ${expected_count}): $3" stop_ice 0 @@ -949,147 +1625,305 @@ check_log_message_count() fi } -buildnativetest +check_section_log_message_count() +{ + log="$1" + expected_count="$2" + count=$(cat_log_last_section ${log} | grep "$3" | wc -l) + if test $count -ne $expected_count; then + echo "Error, $log log does not contain expected count (${count} vs ${expected_count}): $3" + stop_ice 0 + abort_tests + fi +} -rm -f "$testdir"/scheduler_all.log -rm -f "$testdir"/localice_all.log -rm -f "$testdir"/remoteice1_all.log -rm -f "$testdir"/remoteice2_all.log -rm -f "$testdir"/icecc_all.log -rm -f "$testdir"/stderr_all.log -echo -n >"$testdir"/scheduler.log -echo -n >"$testdir"/localice.log -echo -n >"$testdir"/remoteice1.log -echo -n >"$testdir"/remoteice2.log -echo -n >"$testdir"/icecc.log -echo -n >"$testdir"/stderr.log -echo Starting icecream. +# ================================================================== +# Main code starts here +# ================================================================== + +echo + +check_compilers + stop_ice 2 +for log in $alltestlogs; do + rm -f "$testdir"/${log}.log + rm -f "$testdir"/${log}_section.log + rm -f "$testdir"/${log}_all.log + echo -n >"$testdir"/${log}.log +done +rm -f "$testdir"/valgrind-*.log 2>/dev/null + +buildnativetest + +echo Starting icecream. reset_logs local "Starting" start_ice check_logs_for_generic_errors echo Starting icecream successful. echo -if test -z "$chroot_disabled"; then - make_test 1 - make_test 2 +run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o + +run_ice "$testdir/plain.o" "remote" 0 $TESTCC -Wall -Werror -c plain.c -o "$testdir/"plain.o +run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -c plain.cpp -O2 -o "$testdir/"plain.o +run_ice "$testdir/plain.ii" "local" 0 $TESTCXX -Wall -Werror -E plain.cpp -o "$testdir/"plain.ii +run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes.cpp -o "$testdir"/includes.o +run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes-without.cpp -include includes.h -o "$testdir"/includes.o +run_ice "$testdir/plain.o" "local" 0 $TESTCXX -Wall -Werror -c plain.cpp -mtune=native -o "$testdir"/plain.o +run_ice "$testdir/plain.o" "remote" 0 $TESTCC -Wall -Werror -x c++ -c plain -o "$testdir"/plain.o + +run_ice "$testdir/testdefine.o" "remote" 0 $TESTCXX -Wall -Werror -DICECREAM_TEST_DEFINE=test -c testdefine.cpp -o "$testdir/"testdefine.o +run_ice "$testdir/testdefine.o" "remote" 0 $TESTCXX -Wall -Werror -D ICECREAM_TEST_DEFINE=test -c testdefine.cpp -o "$testdir/"testdefine.o + +run_ice "" "remote" 300 "remoteabort" $TESTCXX -c nonexistent.cpp + +if test -e /bin/true; then + run_ice "" "local" 0 /bin/true +elif test -e /usr/bin/true; then + run_ice "" "local" 0 /usr/bin/true +else + skipped_tests="$skipped_tests run-true" fi -if test -z "$debug_fission_disabled"; then - run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $GXX -Wall -Werror -gsplit-dwarf -g -c plain.cpp -o "$testdir/"plain.o +run_ice "" "local" 300 "nostderrcheck" /bin/nonexistent-at-all-doesnt-exist + +run_ice "$testdir/warninginmacro.o" "remote" 0 $TESTCXX -Wall -Wextra -Werror -c warninginmacro.cpp -o "$testdir/"warninginmacro.o +run_ice "$testdir/unusedmacro.o" "remote" 0 "unusedmacrohack" $TESTCXX -Wall -Wunused-macros -c unusedmacro.cpp -o "$testdir/unusedmacro.o" + +if $TESTCXX -cxx-isystem ./ -fsyntax-only -Werror -c includes.cpp 2>/dev/null; then + run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -cxx-isystem ./ -c includes.cpp -o "$testdir"/includes.o +else + skipped_tests="$skipped_tests cxx-isystem" fi -run_ice "$testdir/plain.o" "remote" 0 $GXX -Wall -Werror -c plain.cpp -o "$testdir/"plain.o -if test -z "$debug_fission_disabled"; then - run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $GCC -Wall -Werror -gsplit-dwarf -c plain.c -o "$testdir/"plain.o - run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $GCC -Wall -Werror -gsplit-dwarf -c plain.c -o "../../../../../../../..$testdir/plain.o" +if test -n "$using_clang"; then + target=$($TESTCXX -dumpmachine) + run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -target $target -c plain.cpp -o "$testdir"/plain.o + if test -z "$chroot_disabled"; then + check_section_log_message remoteice1 "remote compile arguments:.*-target $target" + run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror -c plain.cpp -o "$testdir"/plain.o + check_section_log_message remoteice1 "remote compile arguments:.*-target $target" + fi + run_ice "$testdir/plain.o" "remote" 0 $TESTCXX -Wall -Werror --target=$target -c plain.cpp -o "$testdir"/plain.o + if test -z "$chroot_disabled"; then + check_section_log_message remoteice1 "remote compile arguments:.*--target=$target" + check_section_log_error remoteice1 "remote compile arguments:.*-target $target" + fi +else + skipped_tests="$skipped_tests target" fi -run_ice "$testdir/plain.o" "remote" 0 $GCC -Wall -Werror -c plain.c -o "$testdir/"plain.o -run_ice "$testdir/plain.o" "remote" 0 $GXX -Wall -Werror -c plain.cpp -O2 -o "$testdir/"plain.o -run_ice "$testdir/plain.ii" "local" 0 $GXX -Wall -Werror -E plain.cpp -o "$testdir/"plain.ii -run_ice "$testdir/includes.o" "remote" 0 $GXX -Wall -Werror -c includes.cpp -o "$testdir"/includes.o -run_ice "$testdir/plain.o" "local" 0 $GXX -Wall -Werror -c plain.cpp -mtune=native -o "$testdir"/plain.o -run_ice "$testdir/plain.o" "remote" 0 $GCC -Wall -Werror -x c++ -c plain -o "$testdir"/plain.o -if test -z "$debug_fission_disabled"; then - run_ice "" "remote" 300 "split_dwarf" $GXX -gsplit-dwarf -c nonexistent.cpp + +debug_fission_disabled=1 +$TESTCXX -gsplit-dwarf true.cpp -o "$testdir"/true 2>/dev/null >/dev/null +if test $? -eq 0; then + "$testdir"/true + if test $? -eq 0; then + debug_fission_disabled= + fi + rm -f "$testdir"/true "$testdir"/true.dwo true.dwo fi -run_ice "" "remote" 300 $GXX -c nonexistent.cpp -run_ice "" "local" 0 /bin/true +if test -n "$debug_fission_disabled"; then + skipped_tests="$skipped_tests split-dwarf" +fi +if test -z "$debug_fission_disabled"; then + run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $TESTCXX -Wall -Werror -gsplit-dwarf -g -c plain.cpp -o "$testdir/"plain.o + run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $TESTCC -Wall -Werror -gsplit-dwarf -c plain.c -o "$testdir/"plain.o + run_ice "$testdir/plain.o" "remote" 0 "split_dwarf" $TESTCC -Wall -Werror -gsplit-dwarf -c plain.c -o "../../../../../../../..$testdir/plain.o" + run_ice "" "remote" 300 "split_dwarf" "remoteabort" $TESTCXX -gsplit-dwarf -c nonexistent.cpp +fi -if $GXX -E -fdiagnostics-show-caret messages.cpp >/dev/null 2>/dev/null; then - # gcc stderr workaround, icecream will force a local recompile - run_ice "" "remote" 1 "stderrfix" $GXX -c syntaxerror.cpp - run_ice "$testdir/messages.o" "remote" 0 "stderrfix" $GXX -Wall -c messages.cpp -o "$testdir"/messages.o - check_log_message stderr "warning: unused variable 'unused'" - # try again without the local recompile - run_ice "" "remote" 1 $GXX -c -fno-diagnostics-show-caret syntaxerror.cpp - run_ice "$testdir/messages.o" "remote" 0 $GXX -Wall -c -fno-diagnostics-show-caret messages.cpp -o "$testdir"/messages.o - check_log_message stderr "warning: unused variable 'unused'" +if test -z "$chroot_disabled"; then + if test -z "$using_gcc"; then + run_ice "" "remote" 1 $TESTCXX -c syntaxerror.cpp + check_section_log_error icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + run_ice "$testdir/messages.o" "remote" 0 $TESTCXX -Wall -c messages.cpp -o "$testdir"/messages.o + check_log_message stderr "warning: unused variable 'unused'" + check_section_log_error icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + else + if $TESTCXX -E -fdiagnostics-show-caret -Werror messages.cpp >/dev/null 2>/dev/null; then + # check gcc stderr workaround, icecream will force a local recompile + run_ice "" "remote" 1 "stderrfix" $TESTCXX -c -fdiagnostics-show-caret syntaxerror.cpp + run_ice "$testdir/messages.o" "remote" 0 "stderrfix" $TESTCXX -Wall -c -fdiagnostics-show-caret messages.cpp -o "$testdir"/messages.o + check_log_message stderr "warning: unused variable 'unused'" + check_section_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + # try again without the local recompile + run_ice "" "remote" 1 $TESTCXX -c -fno-diagnostics-show-caret syntaxerror.cpp + run_ice "$testdir/messages.o" "remote" 0 $TESTCXX -Wall -c -fno-diagnostics-show-caret messages.cpp -o "$testdir"/messages.o + check_log_message stderr "warning: unused variable 'unused'" + check_section_log_error icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + else + # This gcc is too old to have this problem, but we do not check the gcc version in icecc. + run_ice "" "remote" 1 "stderrfix" $TESTCXX -c syntaxerror.cpp + check_section_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + run_ice "$testdir/messages.o" "remote" 0 "stderrfix" $TESTCXX -Wall -c messages.cpp -o "$testdir"/messages.o + check_log_message stderr "warning: unused variable 'unused'" + check_section_log_message icecc "local build forced by remote exception: Error 102 - command needs stdout/stderr workaround, recompiling locally" + fi + fi else - run_ice "" "remote" 1 $GXX -c syntaxerror.cpp - run_ice "$testdir/messages.o" "remote" 0 $GXX -Wall -c messages.cpp -o "$testdir"/messages.o - check_log_message stderr "warning: unused variable 'unused'" + skipped_tests="$skipped_tests gcc-caret" fi if command -v gdb >/dev/null; then if command -v readelf >/dev/null; then - debug_test "$GXX" "-c -g debug.cpp" "Temporary breakpoint 1, main () at debug.cpp:8" + debug_test "$TESTCXX" "-c -g debug.cpp" "Temporary breakpoint 1, main () at debug.cpp:8" + debug_test "$TESTCXX" "-c -g $(pwd)/debug/debug2.cpp" "Temporary breakpoint 1, main () at $(pwd)/debug/debug2.cpp:8" if test -z "$debug_fission_disabled"; then - debug_test "$GXX" "-c -g debug.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at debug.cpp:8" - debug_test "$GXX" "-c -g `pwd`/debug/debug2.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at `pwd`/debug/debug2.cpp:8" + debug_test "$TESTCXX" "-c -g debug.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at debug.cpp:8" + debug_test "$TESTCXX" "-c -g $(pwd)/debug/debug2.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at $(pwd)/debug/debug2.cpp:8" fi - debug_test "$GXX" "-c -g `pwd`/debug/debug2.cpp" "Temporary breakpoint 1, main () at `pwd`/debug/debug2.cpp:8" fi else skipped_tests="$skipped_tests debug" fi -icerun_test +if $TESTCXX -fsanitize=address -Werror fsanitize.cpp -o /dev/null >/dev/null 2>/dev/null; then + run_ice "$testdir/fsanitize.o" "remote" 0 keepoutput $TESTCXX -c -fsanitize=address -g fsanitize.cpp -o "$testdir"/fsanitize.o + $TESTCXX -fsanitize=address -g "$testdir"/fsanitize.o -o "$testdir"/fsanitize 2>>"$testdir"/stderr.log + if test $? -ne 0; then + echo "Linking for -fsanitize test failed." + stop_ice 0 + abort_tests + fi + "$testdir"/fsanitize 2>>"$testdir"/stderr.log + check_log_message stderr "ERROR: AddressSanitizer: heap-use-after-free" + # Only newer versions of ASAN have the SUMMARY line. + if grep -q "^SUMMARY:" "$testdir"/stderr.log; then + check_log_message stderr "SUMMARY: AddressSanitizer: heap-use-after-free .*fsanitize.cpp:5.* test_fsanitize_function()" + fi + rm "$testdir"/fsanitize.o + + if $TESTCXX -fsanitize=address -fsanitize-blacklist=fsanitize-blacklist.txt -c -fsyntax-only fsanitize.cpp >/dev/null 2>/dev/null; then + run_ice "" "local" 300 $TESTCXX -c -fsanitize=address -fsanitize-blacklist=nonexistent -g fsanitize.cpp -o "$testdir"/fsanitize.o + check_section_log_message icecc "file for argument -fsanitize-blacklist=nonexistent missing, building locally" + + run_ice "$testdir/fsanitize.o" "remote" 0 keepoutput $TESTCXX -c -fsanitize=address -fsanitize-blacklist=fsanitize-blacklist.txt -g fsanitize.cpp -o "$testdir"/fsanitize.o + $TESTCXX -fsanitize=address -fsanitize-blacklist=fsanitize-blacklist.txt -g "$testdir"/fsanitize.o -o "$testdir"/fsanitize 2>>"$testdir"/stderr.log + if test $? -ne 0; then + echo "Linking for -fsanitize test failed." + stop_ice 0 + abort_tests + fi + "$testdir"/fsanitize 2>>"$testdir"/stderr.log + check_log_error stderr "ERROR: AddressSanitizer: heap-use-after-free" + if grep -q "^SUMMARY:" "$testdir"/stderr.log; then + check_log_error stderr "SUMMARY: AddressSanitizer: heap-use-after-free .*fsanitize.cpp:5 in test()" + fi + rm "$testdir"/fsanitize.o + else + skipped_tests="$skipped_tests fsanitize-blacklist" + fi +else + skipped_tests="$skipped_tests fsanitize" +fi + +# test -frewrite-includes usage +$TESTCXX -E -Werror -frewrite-includes messages.cpp 2>/dev/null | grep -q '^# 1 "messages.cpp"$' >/dev/null 2>/dev/null +if test $? -eq 0; then + run_ice "$testdir/messages.o" "remote" 0 $TESTCXX -Wall -c messages.cpp -o "$testdir"/messages.o + check_log_message stderr "warning: unused variable 'unused'" +else + echo $TESTCXX does not provide functional -frewrite-includes, skipping test. + echo + skipped_tests="$skipped_tests clang_rewrite_includes" +fi + +run_ice "$testdir/includes.h.gch" "local" 0 "keepoutput" $TESTCXX -x c++-header -Wall -Werror -c includes.h -o "$testdir"/includes.h.gch +run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes.cpp -include "$testdir"/includes.h -o "$testdir"/includes.o +if test -n "$using_clang"; then + run_ice "$testdir/includes.o" "remote" 0 $TESTCXX -Wall -Werror -c includes.cpp -include-pch "$testdir"/includes.h.gch -o "$testdir"/includes.o +fi +rm "$testdir"/includes.h.gch + +if test -n "$using_clang"; then + clangplugintest +else + skipped_tests="$skipped_tests clangplugin" +fi + +icerun_serialize_test +icerun_nopath_test +icerun_nocompile_test recursive_test +ccache_test + +symlink_wrapper_test + +if test -z "$chroot_disabled"; then + make_test +else + skipped_tests="$skipped_tests make_test" +fi + if test -z "$chroot_disabled"; then zero_local_jobs_test else skipped_tests="$skipped_tests zero_local_jobs_test" fi -if test -x $CLANGXX; then - # There's probably not much point in repeating all tests with Clang, but at least - # try it works (there's a different icecc-create-env run needed, and -frewrite-includes - # usage needs checking). - # Clang writes the input filename in the resulting .o , which means the outputs - # cannot match (remote node will use stdin for the input, while icecc always - # builds locally if it itself gets data from stdin). It'd be even worse with -g, - # since the -frewrite-includes transformation apparently makes the debugginfo - # differ too (although the end results work just as well). So just do not compare. - # It'd be still nice to check at least somehow that this really works though. - run_ice "" "remote" 0 $CLANGXX -Wall -Werror -c plain.cpp -o "$testdir"/plain.o - rm "$testdir"/plain.o - run_ice "" "remote" 0 $CLANGXX -Wall -Werror -c includes.cpp -o "$testdir"/includes.o - rm "$testdir"/includes.o - run_ice "" "remote" 0 $CLANGXX -Wall -Werror -cxx-isystem ./ -c includes.cpp -o "$testdir"/includes.o - rm "$testdir"/includes.o - run_ice "" "remote" 0 $CLANGXX -Wall -Werror -target x86_64-linux-gnu -c includes.cpp -o "$testdir"/includes.o - rm "$testdir"/includes.o - - # test -frewrite-includes usage - $CLANGXX -E -Werror -frewrite-includes messages.cpp | grep -q '^# 1 "messages.cpp"$' >/dev/null 2>/dev/null - if test $? -eq 0; then - run_ice "" "remote" 0 $CLANGXX -Wall -c messages.cpp -o "$testdir"/messages.o - check_log_message stderr "warning: unused variable 'unused'" - rm "$testdir"/messages.o - else - skipped_tests="$skipped_tests clang_rewrite_includes" - fi - - clang_debug_fission_disabled= - $CLANGXX -E -gsplit-dwarf messages.cpp 2>/dev/null >/dev/null || clang_debug_fission_disabled=1 - if test -n "$debug_fission_disabled"; then - skipped_tests="$skipped_tests split-dwarf(clang++)" - fi +if test -z "$chroot_disabled"; then + echo Testing different netnames. + reset_logs remote "Different netnames" + stop_ice 1 + # Start the secondary scheduler before the primary, so that besides the different netname it would be the preferred scheduler. + ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 \ + ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ + $valgrind "${icecc_scheduler}" -p 8769 -l "$testdir"/scheduler2.log -n ${netname}_secondary -v -v -v & + scheduler2_pid=$! + echo $scheduler2_pid > "$testdir"/scheduler2.pid + wait_for_ice_startup_complete scheduler2 + start_ice + check_log_message scheduler2 "Received scheduler announcement from .* (version $protocolversion, netname ${netname})" + check_log_error scheduler "has announced itself as a preferred scheduler, disconnecting all connections" + check_log_message localice "Ignoring scheduler at .*:8769 because of a different netname (${netname}_secondary)" + check_log_message remoteice1 "Ignoring scheduler at .*:8769 because of a different netname (${netname}_secondary)" + check_log_message remoteice2 "Ignoring scheduler at .*:8769 because of a different netname (${netname}_secondary)" + stop_secondary_scheduler 1 + echo Different netnames test successful. + echo - if command -v gdb >/dev/null; then - if command -v readelf >/dev/null; then - debug_test "$CLANGXX" "-c -g debug.cpp" "Temporary breakpoint 1, main () at debug.cpp:8" - if test -z "$clang_debug_fission_disabled"; then - debug_test "$CLANGXX" "-c -g debug.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at debug.cpp:8" - debug_test "$CLANGXX" "-c -g `pwd`/debug/debug2.cpp -gsplit-dwarf" "Temporary breakpoint 1, main () at `pwd`/debug/debug2.cpp:8" - fi - debug_test "$CLANGXX" "-c -g `pwd`/debug/debug2.cpp" "Temporary breakpoint 1, main () at `pwd`/debug/debug2.cpp:8" - fi - fi + echo Testing newer scheduler. + reset_logs remote "Newer scheduler" + # Make this scheduler fake its start time, so it should be the preferred scheduler. + # We could similarly fake the version to be higher, but this should be safer. + ICECC_TESTS=1 ICECC_TEST_SCHEDULER_PORTS=8767:8769 ICECC_FAKE_STARTTIME=1 \ + ICECC_TEST_FLUSH_LOG_MARK="$testdir"/flush_log_mark.txt ICECC_TEST_LOG_HEADER="$testdir"/log_header.txt \ + $valgrind "${icecc_scheduler}" -p 8769 -l "$testdir"/scheduler2.log -n ${netname} -v -v -v & + scheduler2_pid=$! + echo $scheduler2_pid > "$testdir"/scheduler2.pid + wait_for_ice_startup_complete scheduler2 + # Give the primary scheduler time to disconnect all clients. + sleep 1 + check_log_message scheduler "Received scheduler announcement from .* (version $protocolversion, netname ${netname})" + check_log_message scheduler "has announced itself as a preferred scheduler, disconnecting all connections" + check_log_error scheduler2 "has announced itself as a preferred scheduler, disconnecting all connections" + check_log_message localice "scheduler closed connection" + check_log_message remoteice1 "scheduler closed connection" + check_log_message remoteice2 "scheduler closed connection" + # Daemons will not connect to the secondary debug scheduler (not implemented). + stop_secondary_scheduler 1 + echo Newer scheduler test successful. + echo - if test -n "$builddir" -a -f "$builddir"/clangplugin.so; then - clangplugintest - else - skipped_tests="$skipped_tests clangplugin" - fi + echo Testing reconnect. + reset_logs remote "Reconnect" + wait_for_ice_startup_complete localice remoteice1 remoteice2 + flush_logs + check_log_message scheduler "login localice protocol version: ${protocolversion}" + check_log_message scheduler "login remoteice1 protocol version: ${protocolversion}" + check_log_message scheduler "login remoteice2 protocol version: ${protocolversion}" + check_log_message localice "Connected to scheduler" + check_log_message remoteice1 "Connected to scheduler" + check_log_message remoteice2 "Connected to scheduler" + echo Reconnect test successful. + echo else - skipped_tests="$skipped_tests clang" + skipped_tests="$skipped_tests scheduler_multiple" fi reset_logs local "Closing down" @@ -1100,20 +1934,49 @@ reset_logs local "Starting only daemon" start_only_daemon # even without scheduler, icerun should still serialize, but still run up to local number of jobs in parallel -icerun_test "noscheduler" +icerun_serialize_test "noscheduler" reset_logs local "Closing down (only daemon)" stop_only_daemon 1 -finish_logs +buildnativewithsymlinktest +buildnativewithwrappertest if test -n "$valgrind"; then rm -f "$testdir"/valgrind-*.log fi +ignore= +if test -n "$using_gcc"; then + # gcc (as of now) doesn't know these options, ignore these tests if they fail + ignore="cxx-isystem target fsanitize-blacklist clangplugin clang_rewrite_includes" +elif test -n "$using_clang"; then + # clang seems to handle everything + ignore= +fi +ignored_tests= +for item in $ignore; do + if echo " $skipped_tests " | grep -q "$item"; then + ignored_tests="$ignored_tests $item" + skipped_tests="${skipped_tests/$item/}" + fi + skipped_tests=$(echo $skipped_tests | sed 's/ / /g' | sed 's/^ //') +done + +if test -n "$ignored_tests"; then + echo Ignored tests: $ignored_tests +fi + if test -n "$skipped_tests"; then - echo "All tests OK, some were skipped:$skipped_tests" - echo ============= + if test -n "$strict"; then + echo "All executed tests passed, but some were skipped: $skipped_tests" + echo "Strict mode enabled, failing." + echo ================================================== + exit 1 + else + echo "All tests OK, some were skipped: $skipped_tests" + echo ================================= + fi else echo All tests OK. echo ============= diff --git a/tests/testdefine.cpp b/tests/testdefine.cpp new file mode 100644 index 000000000..0ac5670d0 --- /dev/null +++ b/tests/testdefine.cpp @@ -0,0 +1,11 @@ +#ifndef ICECREAM_TEST_DEFINE +#error Failed. +#endif + +// this should expand to test() +void ICECREAM_TEST_DEFINE(); + +void test2() + { + test(); + } diff --git a/tests/true.cpp b/tests/true.cpp new file mode 100644 index 000000000..034217f18 --- /dev/null +++ b/tests/true.cpp @@ -0,0 +1,4 @@ +int main() + { + return 0; + } diff --git a/tests/unusedmacro.cpp b/tests/unusedmacro.cpp new file mode 100644 index 000000000..fdbd8e115 --- /dev/null +++ b/tests/unusedmacro.cpp @@ -0,0 +1,6 @@ +#define FOO bar +#define NUMBER 10 +int f() + { + return NUMBER; + } diff --git a/tests/unusedmacro1.txt b/tests/unusedmacro1.txt new file mode 100644 index 000000000..b08a10f1b --- /dev/null +++ b/tests/unusedmacro1.txt @@ -0,0 +1,3 @@ +unusedmacro.cpp:1:0: warning: macro "FOO" is not used [-Wunused-macros] + #define FOO bar + diff --git a/tests/unusedmacro2.txt b/tests/unusedmacro2.txt new file mode 100644 index 000000000..b8632fa58 --- /dev/null +++ b/tests/unusedmacro2.txt @@ -0,0 +1,3 @@ +unusedmacro.cpp:1:0: warning: macro "FOO" is not used [-Wunused-macros] + #define FOO bar + ^ diff --git a/tests/warninginmacro.cpp b/tests/warninginmacro.cpp new file mode 100644 index 000000000..bc472ffd6 --- /dev/null +++ b/tests/warninginmacro.cpp @@ -0,0 +1,9 @@ +// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=80369 + +#define MACRO if( arg != arg ) return 1; + +int f( int arg ) + { + MACRO + return 2; + }