|
| 1 | +# For a detailed description and guide on how best to use this Docker |
| 2 | +# image, please look at the README. Note that this image is set up to |
| 3 | +# pull many of its requirements from an S3 bucket rather than the |
| 4 | +# public internet. Unless you want to build this Docker image |
| 5 | +# yourself, we'd recommend just using as-is with a docker pull. |
| 6 | + |
| 7 | +# The image extends from the base Jenkins CI image (always an |
| 8 | +# identified version, never latest...), and adds additional tooling |
| 9 | +# that's specific to modern C++ development. It's expected that this |
| 10 | +# image will be extended further for specific use cases - especially |
| 11 | +# for embedded C++. |
| 12 | + |
| 13 | +FROM jenkins/jenkins:2.110 |
| 14 | +MAINTAINER Mike Ritchie <mike@13coders.com> |
| 15 | +LABEL description="Docker base image for C++17 CI builds" |
| 16 | + |
| 17 | +# These environment variables for the AWS command line need to be |
| 18 | +# passed to the docker build command. This is preferable to persisting |
| 19 | +# credentials in the Docker image. Note that these credentials will be |
| 20 | +# visible in your (host) shell history, so clear them down. Also use |
| 21 | +# an IAM role in AWS with highly constrained privileges - potentially |
| 22 | +# read-only access to the single S3 bucket containing the |
| 23 | +# dependencies. |
| 24 | + |
| 25 | +ARG AWS_ACCESS_KEY_ID |
| 26 | +ARG AWS_SECRET_ACCESS_KEY |
| 27 | +ARG AWS_DEFAULT_REGION |
| 28 | +ARG AWS_BUCKET |
| 29 | + |
| 30 | +# Switch to root for installation of additional C++ tooling - we |
| 31 | +# switch back to Jenkins user before the end of the image build. |
| 32 | + |
| 33 | +USER root |
| 34 | + |
| 35 | +# Update the apt repositories and install common development |
| 36 | +# dependencies. |
| 37 | + |
| 38 | +RUN apt-get update |
| 39 | +RUN apt-get install -y build-essential gcc-6 gdb git valgrind \ |
| 40 | + python3-pip linux-perf google-perftools python-dev zlib1g-dev \ |
| 41 | + lcov |
| 42 | + |
| 43 | +# Install and configure AWS command line tools for fetching binaries |
| 44 | +# from a private S3 bucket, using arguments passed from Docker build |
| 45 | +# command line |
| 46 | + |
| 47 | +RUN pip3 install awscli |
| 48 | + |
| 49 | +# Install Robot Framework (pybot) for BDD-style testing of C++ |
| 50 | +# libraries and applications |
| 51 | + |
| 52 | +RUN pip3 install robotframework |
| 53 | + |
| 54 | +# Install Conan package management system with pip. |
| 55 | + |
| 56 | +RUN pip3 install conan |
| 57 | + |
| 58 | +# Versions of tools that are used in this build. File extensions are |
| 59 | +# appended later in the script at the point where assets are |
| 60 | +# downloaded and extracted. Note that these are not expected to be |
| 61 | +# passed as build args when invoking Docker build, just a |
| 62 | +# cheap-and-cheerful way of defining filenames only once. |
| 63 | + |
| 64 | +ARG v_clang_llvm_bin=clang+llvm-5.0.1-x86_64-linux-gnu-debian8.tar.xz |
| 65 | +ARG v_clang_llvm_src=llvm-5.0.1.src.tar.xz |
| 66 | +ARG v_clang_libcxx_src=libcxx-5.0.1.src.tar.xz |
| 67 | +ARG v_clang_libcxxabi_src=libcxxabi-5.0.1.src.tar.xz |
| 68 | +ARG v_pmd=pmd-bin-6.0.1.zip |
| 69 | +ARG v_boost=boost_1_65_1.tar.gz |
| 70 | +ARG v_cmake=cmake-3.10.2-Linux-x86_64.tar.gz |
| 71 | +ARG v_cppcheck=cppcheck-1.82.tar.gz |
| 72 | + |
| 73 | +# Install a more recent (than the Debian Stretch version) CMake from |
| 74 | +# the binary distribution into /usr |
| 75 | + |
| 76 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_cmake} . |
| 77 | +RUN tar xf ${v_cmake} -C /usr --strip 1 |
| 78 | + |
| 79 | +# Install clang/llvm from the cached binary distribution. |
| 80 | + |
| 81 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_clang_llvm_bin} . |
| 82 | +RUN mkdir -p /opt/tools/clang-llvm |
| 83 | +RUN tar xf ${v_clang_llvm_bin} -C /opt/tools/clang-llvm --strip 1 |
| 84 | + |
| 85 | +# Fetch the sources needed to compile an MSAN-sanitized version of |
| 86 | +# libc++. The versions of these sources need to correspond exactly to |
| 87 | +# the clang/llvm version we're using to compile the host (amd64) |
| 88 | + |
| 89 | +RUN mkdir -p /opt/tools/llvm-build/projects/libcxx |
| 90 | +RUN mkdir -p /opt/tools/llvm-build/projects/libcxxabi |
| 91 | + |
| 92 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_clang_llvm_src} . |
| 93 | +RUN tar xf ${v_clang_llvm_src} -C /opt/tools/llvm-build --strip 1 |
| 94 | + |
| 95 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_clang_libcxxabi_src} . |
| 96 | +RUN tar xf ${v_clang_libcxxabi_src} -C /opt/tools/llvm-build/projects/libcxxabi --strip 1 |
| 97 | + |
| 98 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_clang_libcxx_src} . |
| 99 | +RUN tar xf ${v_clang_libcxx_src} -C /opt/tools/llvm-build/projects/libcxx --strip 1 |
| 100 | + |
| 101 | +# Now build libc++ as a MSAN-instrumented library. It's necessary to |
| 102 | +# link to both this and libc++abi when building the MSAN-instrumented |
| 103 | +# binary for unit tests, otherwise many false positives will result |
| 104 | +# from any use of C++ Standard Library API. Building this needs a |
| 105 | +# fairly complete clang/llvm source tree, although we only build the |
| 106 | +# libcxx target within that tree. |
| 107 | + |
| 108 | +RUN mkdir -p /opt/tools/libcxx-msan |
| 109 | +RUN cd /opt/tools/libcxx-msan && cmake \ |
| 110 | + -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=Memory \ |
| 111 | + -DCMAKE_C_COMPILER=/opt/tools/clang-llvm/bin/clang \ |
| 112 | + -DCMAKE_CXX_COMPILER=/opt/tools/clang-llvm/bin/clang++ /opt/tools/llvm-build |
| 113 | +RUN cd /opt/tools/libcxx-msan && make cxx cxxabi -j4 |
| 114 | + |
| 115 | +# Fetch, build and install a complete suite of Boost framework |
| 116 | +# libraries to /usr/local. Note that 1.65.x is the last version that |
| 117 | +# plays nicely with the "find boost" component in CMake - 1.66 breaks |
| 118 | +# compatibility. |
| 119 | + |
| 120 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_boost} . |
| 121 | +RUN mkdir -p boost |
| 122 | +RUN tar xf ${v_boost} -C boost --strip 1 |
| 123 | +RUN cd boost && ./bootstrap.sh --prefix=/usr/local |
| 124 | +RUN cd boost && ./b2 |
| 125 | +RUN cd boost && ./b2 install |
| 126 | + |
| 127 | +# Fetch, build and install the Google microbenchmark support library |
| 128 | +# from sources. |
| 129 | + |
| 130 | +RUN git clone https://github.com/google/benchmark.git -b v1.3.0 |
| 131 | +RUN mkdir -p benchmark/build |
| 132 | +RUN cd benchmark/build && cmake .. -DCMAKE_BUILD_TYPE=Release |
| 133 | +RUN cd benchmark/build && make |
| 134 | +RUN cd benchmark/build && make install |
| 135 | + |
| 136 | +# Fetch, build and install the CppCheck tool for anyone not yet |
| 137 | +# switched to Clang tools |
| 138 | + |
| 139 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_cppcheck} . |
| 140 | +RUN mkdir -p cppcheck/build |
| 141 | +RUN tar xf ${v_cppcheck} -C cppcheck --strip 1 |
| 142 | +RUN cd cppcheck/build && cmake .. -DCMAKE_BUILD_TYPE=Release |
| 143 | +RUN cd cppcheck/build && make |
| 144 | +RUN cd cppcheck/build && make install |
| 145 | + |
| 146 | +# Fetch and install the (Java) CPD tool for detecting sections of |
| 147 | +# duplicated code. PMD is only distributed (in binary form) as a zip |
| 148 | +# file, and there's no --strip option, hence small dance with |
| 149 | +# the directory move. |
| 150 | + |
| 151 | +RUN aws s3 cp s3://${AWS_BUCKET}/${v_pmd} . |
| 152 | +RUN unzip ${v_pmd} -d /opt/tools |
| 153 | +RUN mv /opt/tools/pmd-bin-* /opt/tools/pmd |
| 154 | + |
| 155 | +# Done with everything that needs root permissions. Revert now to the |
| 156 | +# Jenkins user. |
| 157 | + |
| 158 | +USER jenkins |
| 159 | + |
| 160 | +# Set the path to pick up the additional tools we've added. We do this |
| 161 | +# for the Jenkins user, and it has to persist. |
| 162 | + |
| 163 | +ENV PATH "$PATH:/opt/tools/clang-llvm/bin:/opt/tools/pmd/bin" |
| 164 | + |
| 165 | +# Update the number of executors. Typical pipeline design is using a |
| 166 | +# max of 4 parallel steps for 4x sanitizers. |
| 167 | + |
| 168 | +COPY executors.groovy /usr/share/jenkins/ref/init.groovy.d/executors.groovy |
| 169 | + |
| 170 | +# Install additional Jenkins plugins for the pipeline design to |
| 171 | +# operate. On creating and starting a new Jenkins instance from this |
| 172 | +# Docker image, the usual startup screens will be displayed, but the |
| 173 | +# "install plugins" page can be skipped (select "none" in the plugins |
| 174 | +# list when prompted). Switch back to the jenkins user for this step. |
| 175 | + |
| 176 | +COPY plugins.txt /usr/share/jenkins/plugins.txt |
| 177 | +RUN xargs /usr/local/bin/install-plugins.sh < /usr/share/jenkins/plugins.txt |
0 commit comments