| #!/usr/bin/env python3 |
| # Copyright 2015-2016 Obsidian Research Corp. |
| # Licensed under BSD (MIT variant) or GPLv2. See COPYING. |
| # PYTHON_ARGCOMPLETE_OK |
| """cbuild - Build in a docker container |
| |
| This script helps using docker containers to run software builds. This allows |
| building for a wide range of distributions without having to install them. |
| |
| Each target distribution has a base docker image and a set of packages to |
| install. The first step is to build the customized docker container: |
| |
| $ buildlib/cbuild build-images fedora |
| |
| This will download the base image and customize it with the required packages. |
| |
| Next, a build can be performed 'in place'. This is useful to do edit/compile |
| cycles with an alternate distribution. |
| |
| $ buildlib/cbuild make fedora |
| |
| The build output will be placed in build-fcXX, where XX is latest fedora release. |
| |
| Finally, a full package build can be performed inside the container. Note this |
| mode actually creates a source tree inside the container based on the current |
| git HEAD commit, so any uncommitted edits will be lost. |
| |
| $ buildlib/cbuild pkg fedora |
| |
| In this case only the final package results are copied outside the container |
| (to ..) and everything else is discarded. |
| |
| In all cases the containers that are spun up are deleted after they are |
| finished, only the base container created during 'build-images' is kept. The |
| '--run-shell' option can be used to setup the container to the point of |
| running the build command and instead run an interactive bash shell. This is |
| useful for debugging certain kinds of build problems.""" |
| |
| from __future__ import print_function |
| import argparse |
| import collections |
| import filecmp |
| import grp |
| import inspect |
| import json |
| import multiprocessing |
| import os |
| import pwd |
| import re |
| import shutil |
| import subprocess |
| import sys |
| import tempfile |
| import yaml |
| from contextlib import contextmanager; |
| |
| project = "rdma-core"; |
| |
| def get_version(): |
| """Return the version string for the project, this gets automatically written |
| into the packaging files.""" |
| with open("CMakeLists.txt","r") as F: |
| for ln in F: |
| g = re.match(r'^set\(PACKAGE_VERSION "(.+)"\)',ln) |
| if g is None: |
| continue; |
| return g.group(1); |
| raise RuntimeError("Could not find version"); |
| |
| class DockerFile(object): |
| def __init__(self,src): |
| self.lines = ["FROM %s"%(src)]; |
| |
| class Environment(object): |
| azp_images = None; |
| pandoc = True; |
| python_cmd = "python3"; |
| aliases = set(); |
| use_make = False; |
| proxy = True; |
| build_pyverbs = True; |
| docker_opts = [] |
| |
| to_azp = False; |
| |
| def _get_azp_names(self): |
| if Environment.azp_images: |
| return Environment.azp_images; |
| |
| with open("buildlib/azure-pipelines.yml") as F: |
| azp = yaml.safe_load(F) |
| Environment.azp_images = set(I["image"] for I in azp["resources"]["containers"]) |
| return Environment.azp_images; |
| |
| def image_name(self): |
| if self.to_azp: |
| # Get the version number of the container out of the azp file. |
| prefix = "ucfconsort.azurecr.io/%s/%s:"%(project, self.name); |
| for I in self._get_azp_names(): |
| if I.startswith(prefix): |
| return I; |
| raise ValueError("Image is not used in buildlib/azure-pipelines.yml") |
| return "build-%s/%s"%(project,self.name); |
| |
| # ------------------------------------------------------------------------- |
| |
| class YumEnvironment(Environment): |
| is_rpm = True; |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN yum install -y %s && yum clean all"%( |
| " ".join(sorted(self.pkgs)))); |
| return res; |
| |
| class centos7(YumEnvironment): |
| docker_parent = "centos:7"; |
| pkgs = { |
| 'cmake', |
| 'gcc', |
| 'libnl3-devel', |
| 'libudev-devel', |
| 'make', |
| 'pkgconfig', |
| 'python', |
| 'python-argparse', |
| 'python-docutils', |
| 'rpm-build', |
| 'systemd-devel', |
| 'valgrind-devel', |
| } |
| name = "centos7"; |
| use_make = True; |
| pandoc = False; |
| build_pyverbs = False; |
| specfile = "redhat/rdma-core.spec"; |
| python_cmd = "python"; |
| to_azp = True; |
| |
| class centos7_epel(centos7): |
| pkgs = (centos7.pkgs - {"cmake","make"}) | { |
| "cmake3", |
| "ninja-build", |
| "pandoc", |
| "python34-setuptools", |
| 'python34-Cython', |
| 'python34-devel', |
| }; |
| name = "centos7_epel"; |
| build_pyverbs = True; |
| use_make = False; |
| pandoc = True; |
| ninja_cmd = "ninja-build"; |
| # Our spec file does not know how to cope with cmake3 |
| is_rpm = False; |
| to_azp = False; |
| |
| def get_docker_file(self,tmpdir): |
| res = YumEnvironment.get_docker_file(self,tmpdir); |
| res.lines.insert(1,"RUN yum install -y epel-release"); |
| res.lines.append("RUN ln -s /usr/bin/cmake3 /usr/local/bin/cmake && ln -sf /usr/bin/python3.4 /usr/bin/python3"); |
| return res; |
| |
| class amazonlinux2(YumEnvironment): |
| docker_parent = "amazonlinux:2"; |
| pkgs = centos7.pkgs; |
| name = "amazonlinux2"; |
| use_make = True; |
| pandoc = False; |
| build_pyverbs = False; |
| specfile = "redhat/rdma-core.spec"; |
| python_cmd = "python"; |
| to_azp = False; |
| |
| class centos8(Environment): |
| docker_parent = "quay.io/centos/centos:stream8" |
| pkgs = { |
| "pandoc", |
| "perl-generators", |
| "python3-Cython", |
| "python3-devel", |
| "python3-docutils", |
| 'cmake', |
| 'gcc', |
| 'libnl3-devel', |
| 'libudev-devel', |
| 'ninja-build', |
| 'pkgconfig', |
| 'rpm-build', |
| 'systemd-devel', |
| 'valgrind-devel', |
| }; |
| name = "centos8"; |
| specfile = "redhat/rdma-core.spec"; |
| is_rpm = True; |
| to_azp = True; |
| proxy = False; |
| |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN dnf config-manager --set-enabled powertools && " |
| "dnf install -y %s && dnf clean all" % |
| (" ".join(sorted(self.pkgs)))) |
| return res; |
| |
| class centos9(Environment): |
| docker_parent = "quay.io/centos/centos:stream9" |
| pkgs = centos8.pkgs |
| name = "centos9" |
| specfile = "redhat/rdma-core.spec" |
| ninja_cmd = "ninja-build" |
| is_rpm = True |
| to_azp = True |
| proxy = False |
| |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN dnf install -y 'dnf-command(config-manager)' epel-release &&" |
| "dnf config-manager --set-enabled crb && " |
| "dnf install -y %s && dnf clean all" % |
| (" ".join(sorted(self.pkgs)))) |
| return res |
| |
| class fc41(Environment): |
| docker_parent = "fedora:41"; |
| pkgs = centos8.pkgs | {"util-linux"} |
| name = "fc41"; |
| specfile = "redhat/rdma-core.spec"; |
| ninja_cmd = "ninja-build"; |
| is_rpm = True; |
| aliases = {"fedora"}; |
| to_azp = True; |
| |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN dnf install -y %s && dnf clean all"%( |
| " ".join(sorted(self.pkgs)))); |
| return res; |
| |
| # ------------------------------------------------------------------------- |
| |
| class APTEnvironment(Environment): |
| is_deb = True; |
| build_python = True; |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends %s && apt-get clean && rm -rf /usr/share/doc/ /usr/lib/debug /var/lib/apt/lists/"%( |
| " ".join(sorted(self.pkgs)))); |
| return res; |
| |
| def add_source_list(self,tmpdir,name,content): |
| sld = os.path.join(tmpdir,"etc","apt","sources.list.d"); |
| if not os.path.isdir(sld): |
| os.makedirs(sld); |
| with open(os.path.join(sld,name),"w") as F: |
| F.write(content + "\n"); |
| |
| def fix_https(self,tmpdir): |
| """The ubuntu image does not include ca-certificates, so if we want to use |
| HTTPS disable certificate validation.""" |
| cfgd = os.path.join(tmpdir,"etc","apt","apt.conf.d") |
| if not os.path.isdir(cfgd): |
| os.makedirs(cfgd) |
| with open(os.path.join(cfgd,"01nossl"),"w") as F: |
| F.write('Acquire::https { Verify-Peer "false"; };') |
| |
| class xenial(APTEnvironment): |
| docker_parent = "ubuntu:16.04" |
| pkgs = { |
| 'build-essential', |
| 'cmake', |
| 'debhelper', |
| 'dh-systemd', |
| 'fakeroot', # for AZP |
| 'gcc', |
| 'libnl-3-dev', |
| 'libnl-route-3-dev', |
| 'libsystemd-dev', |
| 'libudev-dev', |
| 'make', |
| 'ninja-build', |
| 'pandoc', |
| 'pkg-config', |
| 'python3', |
| 'python3-docutils', |
| 'valgrind', |
| }; |
| name = "ubuntu-16.04"; |
| aliases = {"xenial"}; |
| to_azp = True; |
| |
| class bionic(APTEnvironment): |
| docker_parent = "ubuntu:18.04" |
| pkgs = xenial.pkgs | { |
| 'cython3', |
| 'python3-dev', |
| }; |
| name = "ubuntu-18.04"; |
| aliases = {"bionic", "ubuntu"}; |
| to_azp = True |
| |
| class focal(APTEnvironment): |
| docker_parent = "ubuntu:20.04" |
| pkgs = bionic.pkgs | { |
| 'dh-python', |
| } |
| name = "ubuntu-20.04"; |
| aliases = {"focal", "ubuntu"}; |
| to_azp = True |
| |
| class jammy(APTEnvironment): |
| docker_parent = "ubuntu:22.04" |
| pkgs = (bionic.pkgs ^ {"dh-systemd"}) | { |
| 'dh-python', |
| } |
| name = "ubuntu-22.04"; |
| aliases = {"jammy", "ubuntu"}; |
| |
| class jessie(APTEnvironment): |
| docker_parent = "debian:8" |
| pkgs = xenial.pkgs; |
| name = "debian-8"; |
| aliases = {"jessie"}; |
| build_pyverbs = False; |
| |
| class stretch(APTEnvironment): |
| docker_parent = "debian:9" |
| pkgs = bionic.pkgs; |
| name = "debian-9"; |
| aliases = {"stretch"}; |
| |
| class bullseye(APTEnvironment): |
| docker_parent = "debian:11" |
| pkgs = { |
| 'build-essential', |
| 'cmake', |
| 'debhelper', |
| 'fakeroot', # for AZP |
| 'gcc', |
| 'libnl-3-dev', |
| 'libnl-route-3-dev', |
| 'libsystemd-dev', |
| 'libudev-dev', |
| 'make', |
| 'ninja-build', |
| 'pandoc', |
| 'pkg-config', |
| 'python3', |
| 'python3-docutils', |
| 'valgrind', |
| }; |
| name = "debian-11"; |
| aliases = {"bullseye"}; |
| |
| class bullseye_i386(APTEnvironment): |
| docker_parent = "debian:11" |
| pkgs = bullseye.pkgs | {"nodejs"} |
| name = "debian-11-i386"; |
| aliases = {"bullseye_i386"}; |
| docker_opts = ["--platform","linux/386"] |
| to_azp = True |
| |
| def get_docker_file(self,tmpdir): |
| res = json.loads(docker_cmd_str(args,"manifest","inspect",self.docker_parent)) |
| |
| # Docker is somewhat obnoxious in how it handles the multi-platform |
| # images since it does not store the manifest locally and thus |
| # overwrites the local tag. Figure out the tag we want by hash and |
| # use it directly. Docker will cache this. |
| for image in res["manifests"]: |
| platform = image["platform"] |
| if platform["architecture"] == "386" and platform["os"] == "linux": |
| base = f"{self.docker_parent}@{image['digest']}" |
| break |
| else: |
| raise RuntimeError("Docker manifest failed"); |
| res = DockerFile(base); |
| res.lines.append("RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends %s && apt-get clean && rm -rf /usr/share/doc/ /usr/lib/debug /var/lib/apt/lists/"%( |
| " ".join(sorted(self.pkgs)))); |
| res.lines.append('LABEL "com.azure.dev.pipelines.agent.handler.node.path"="/usr/bin/node"') |
| return res; |
| |
| class debian_experimental(APTEnvironment): |
| docker_parent = "debian:experimental" |
| pkgs = (stretch.pkgs ^ {"gcc"}) | {"gcc-9"}; |
| name = "debian-experimental"; |
| |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN apt-get update && apt-get -t experimental install -y --no-install-recommends %s && apt-get clean"%( |
| " ".join(sorted(self.pkgs)))); |
| return res; |
| |
| # ------------------------------------------------------------------------- |
| |
| class ZypperEnvironment(Environment): |
| proxy = False; |
| is_rpm = True; |
| def get_docker_file(self,tmpdir): |
| res = DockerFile(self.docker_parent); |
| res.lines.append("RUN zypper --non-interactive refresh"); |
| res.lines.append("RUN zypper --non-interactive dist-upgrade"); |
| res.lines.append("RUN zypper --non-interactive install %s"%( |
| " ".join(sorted(self.pkgs)))); |
| return res; |
| |
| class leap(ZypperEnvironment): |
| docker_parent = "opensuse/leap:15.0"; |
| specfile = "suse/rdma-core.spec"; |
| pkgs = { |
| 'cmake', |
| 'gcc', |
| 'libnl3-devel', |
| 'libudev-devel', |
| 'udev', |
| 'make', |
| 'ninja', |
| 'pandoc', |
| 'pkg-config', |
| 'python3', |
| 'rpm-build', |
| 'systemd-devel', |
| 'valgrind-devel', |
| 'python3-Cython', |
| 'python3-devel', |
| 'python3-docutils', |
| }; |
| rpmbuild_options = [ "--without=curlmini" ]; |
| to_azp = True; |
| name = "opensuse-15.0"; |
| aliases = {"leap"}; |
| |
| class tumbleweed(ZypperEnvironment): |
| docker_parent = "opensuse/tumbleweed:latest"; |
| pkgs = (leap.pkgs ^ {"valgrind-devel"}) | { |
| "valgrind-client-headers", |
| "perl" |
| }; |
| name = "tumbleweed"; |
| specfile = "suse/rdma-core.spec"; |
| rpmbuild_options = [ "--without=curlmini" ]; |
| |
| # ------------------------------------------------------------------------- |
| |
| class azure_pipelines(APTEnvironment): |
| docker_parent = "ubuntu:22.04" |
| pkgs = { |
| "abi-compliance-checker", |
| "abi-dumper", |
| "ca-certificates", |
| "clang-15", |
| "cmake", |
| "cython3", |
| "debhelper", |
| "dh-python", |
| "dpkg-dev", |
| "fakeroot", |
| "gcc-12", |
| "git", |
| "libc6-dev", |
| "libnl-3-dev", |
| "libnl-route-3-dev", |
| "libsystemd-dev", |
| "libudev-dev", |
| "lintian", |
| "make", |
| "ninja-build", |
| "pandoc", |
| "pkg-config", |
| "python3", |
| "python3-dev", |
| "python3-docutils", |
| "python3-pkg-resources", |
| "python3-yaml", |
| "sparse", |
| "valgrind", |
| } | { |
| # ARM 64 cross compiler |
| "gcc-12-aarch64-linux-gnu", |
| "libgcc-12-dev:arm64", |
| "libc6-dev:arm64", |
| "libnl-3-dev:arm64", |
| "libnl-route-3-dev:arm64", |
| "libsystemd-dev:arm64", |
| "libudev-dev:arm64", |
| } | { |
| # PPC 64 cross compiler |
| "gcc-12-powerpc64le-linux-gnu", |
| "libgcc-12-dev:ppc64el", |
| "libc6-dev:ppc64el", |
| "libnl-3-dev:ppc64el", |
| "libnl-route-3-dev:ppc64el", |
| "libsystemd-dev:ppc64el", |
| "libudev-dev:ppc64el", |
| } |
| to_azp = True; |
| name = "azure_pipelines"; |
| aliases = {"azp"} |
| |
| llvm_sources = """ |
| Types: deb |
| URIs: http://apt.llvm.org/jammy/ |
| Suites: llvm-toolchain-jammy-15 |
| Components: main |
| Architectures: amd64 |
| Signed-By: |
| -----BEGIN PGP PUBLIC KEY BLOCK----- |
| Version: GnuPG v1.4.12 (GNU/Linux) |
| . |
| mQINBFE9lCwBEADi0WUAApM/mgHJRU8lVkkw0CHsZNpqaQDNaHefD6Rw3S4LxNmM |
| EZaOTkhP200XZM8lVdbfUW9xSjA3oPldc1HG26NjbqqCmWpdo2fb+r7VmU2dq3NM |
| R18ZlKixiLDE6OUfaXWKamZsXb6ITTYmgTO6orQWYrnW6ckYHSeaAkW0wkDAryl2 |
| B5v8aoFnQ1rFiVEMo4NGzw4UX+MelF7rxaaregmKVTPiqCOSPJ1McC1dHFN533FY |
| Wh/RVLKWo6npu+owtwYFQW+zyQhKzSIMvNujFRzhIxzxR9Gn87MoLAyfgKEzrbbT |
| DhqqNXTxS4UMUKCQaO93TzetX/EBrRpJj+vP640yio80h4Dr5pAd7+LnKwgpTDk1 |
| G88bBXJAcPZnTSKu9I2c6KY4iRNbvRz4i+ZdwwZtdW4nSdl2792L7Sl7Nc44uLL/ |
| ZqkKDXEBF6lsX5XpABwyK89S/SbHOytXv9o4puv+65Ac5/UShspQTMSKGZgvDauU |
| cs8kE1U9dPOqVNCYq9Nfwinkf6RxV1k1+gwtclxQuY7UpKXP0hNAXjAiA5KS5Crq |
| 7aaJg9q2F4bub0mNU6n7UI6vXguF2n4SEtzPRk6RP+4TiT3bZUsmr+1ktogyOJCc |
| Ha8G5VdL+NBIYQthOcieYCBnTeIH7D3Sp6FYQTYtVbKFzmMK+36ERreL/wARAQAB |
| tD1TeWx2ZXN0cmUgTGVkcnUgLSBEZWJpYW4gTExWTSBwYWNrYWdlcyA8c3lsdmVz |
| dHJlQGRlYmlhbi5vcmc+iQI4BBMBAgAiBQJRPZQsAhsDBgsJCAcDAgYVCAIJCgsE |
| FgIDAQIeAQIXgAAKCRAVz00Yr090Ibx+EADArS/hvkDF8juWMXxh17CgR0WZlHCC |
| 9CTBWkg5a0bNN/3bb97cPQt/vIKWjQtkQpav6/5JTVCSx2riL4FHYhH0iuo4iAPR |
| udC7Cvg8g7bSPrKO6tenQZNvQm+tUmBHgFiMBJi92AjZ/Qn1Shg7p9ITivFxpLyX |
| wpmnF1OKyI2Kof2rm4BFwfSWuf8Fvh7kDMRLHv+MlnK/7j/BNpKdozXxLcwoFBmn |
| l0WjpAH3OFF7Pvm1LJdf1DjWKH0Dc3sc6zxtmBR/KHHg6kK4BGQNnFKujcP7TVdv |
| gMYv84kun14pnwjZcqOtN3UJtcx22880DOQzinoMs3Q4w4o05oIF+sSgHViFpc3W |
| R0v+RllnH05vKZo+LDzc83DQVrdwliV12eHxrMQ8UYg88zCbF/cHHnlzZWAJgftg |
| hB08v1BKPgYRUzwJ6VdVqXYcZWEaUJmQAPuAALyZESw94hSo28FAn0/gzEc5uOYx |
| K+xG/lFwgAGYNb3uGM5m0P6LVTfdg6vDwwOeTNIExVk3KVFXeSQef2ZMkhwA7wya |
| KJptkb62wBHFE+o9TUdtMCY6qONxMMdwioRE5BYNwAsS1PnRD2+jtlI0DzvKHt7B |
| MWd8hnoUKhMeZ9TNmo+8CpsAtXZcBho0zPGz/R8NlJhAWpdAZ1CmcPo83EW86Yq7 |
| BxQUKnNHcwj2ebkCDQRRPZQsARAA4jxYmbTHwmMjqSizlMJYNuGOpIidEdx9zQ5g |
| zOr431/VfWq4S+VhMDhs15j9lyml0y4ok215VRFwrAREDg6UPMr7ajLmBQGau0Fc |
| bvZJ90l4NjXp5p0NEE/qOb9UEHT7EGkEhaZ1ekkWFTWCgsy7rRXfZLxB6sk7pzLC |
| DshyW3zjIakWAnpQ5j5obiDy708pReAuGB94NSyb1HoW/xGsGgvvCw4r0w3xPStw |
| F1PhmScE6NTBIfLliea3pl8vhKPlCh54Hk7I8QGjo1ETlRP4Qll1ZxHJ8u25f/ta |
| RES2Aw8Hi7j0EVcZ6MT9JWTI83yUcnUlZPZS2HyeWcUj+8nUC8W4N8An+aNps9l/ |
| 21inIl2TbGo3Yn1JQLnA1YCoGwC34g8QZTJhElEQBN0X29ayWW6OdFx8MDvllbBV |
| ymmKq2lK1U55mQTfDli7S3vfGz9Gp/oQwZ8bQpOeUkc5hbZszYwP4RX+68xDPfn+ |
| M9udl+qW9wu+LyePbW6HX90LmkhNkkY2ZzUPRPDHZANU5btaPXc2H7edX4y4maQa |
| xenqD0lGh9LGz/mps4HEZtCI5CY8o0uCMF3lT0XfXhuLksr7Pxv57yue8LLTItOJ |
| d9Hmzp9G97SRYYeqU+8lyNXtU2PdrLLq7QHkzrsloG78lCpQcalHGACJzrlUWVP/ |
| fN3Ht3kAEQEAAYkCHwQYAQIACQUCUT2ULAIbDAAKCRAVz00Yr090IbhWEADbr50X |
| OEXMIMGRLe+YMjeMX9NG4jxs0jZaWHc/WrGR+CCSUb9r6aPXeLo+45949uEfdSsB |
| pbaEdNWxF5Vr1CSjuO5siIlgDjmT655voXo67xVpEN4HhMrxugDJfCa6z97P0+ML |
| PdDxim57uNqkam9XIq9hKQaurxMAECDPmlEXI4QT3eu5qw5/knMzDMZj4Vi6hovL |
| wvvAeLHO/jsyfIdNmhBGU2RWCEZ9uo/MeerPHtRPfg74g+9PPfP6nyHD2Wes6yGd |
| oVQwtPNAQD6Cj7EaA2xdZYLJ7/jW6yiPu98FFWP74FN2dlyEA2uVziLsfBrgpS4l |
| tVOlrO2YzkkqUGrybzbLpj6eeHx+Cd7wcjI8CalsqtL6cG8cUEjtWQUHyTbQWAgG |
| 5VPEgIAVhJ6RTZ26i/G+4J8neKyRs4vz+57UGwY6zI4AB1ZcWGEE3Bf+CDEDgmnP |
| LSwbnHefK9IljT9XU98PelSryUO/5UPw7leE0akXKB4DtekToO226px1VnGp3Bov |
| 1GBGvpHvL2WizEwdk+nfk8LtrLzej+9FtIcq3uIrYnsac47Pf7p0otcFeTJTjSq3 |
| krCaoG4Hx0zGQG2ZFpHrSrZTVy6lxvIdfi0beMgY6h78p6M9eYZHQHc02DjFkQXN |
| bXb5c6gCHESH5PXwPU4jQEE7Ib9J6sbk7ZT2Mw== |
| =j+4q |
| -----END PGP PUBLIC KEY BLOCK----- |
| """ |
| |
| gcc12_sources = """ |
| Types: deb |
| URIs: https://ppa.launchpadcontent.net/ubuntu-toolchain-r/test/ubuntu |
| Suites: jammy |
| Components: main |
| Architectures: amd64 arm64 ppc64el |
| Signed-By: |
| -----BEGIN PGP PUBLIC KEY BLOCK----- |
| . |
| xo0ESuBvRwEEAMi4cDba7xlKaaoXjO1n1HX8RKrkW+HEIl79nSOSJyvzysajs7zU |
| ow/OzCQp9NswqrDmNuH1+lPTTRNAGtK8r2ouq2rnXT1mTl23dpgHZ9spseR73s4Z |
| BGw/ag4bpU5dNUStvfmHhIjVCuiSpNn7cyy1JSSvSs3N2mxteKjXLBf7ABEBAAHN |
| GkxhdW5jaHBhZCBUb29sY2hhaW4gYnVpbGRzwrYEEwECACAFAkrgb0cCGwMGCwkI |
| BwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAek3eiup7yfzGKA/4xzUqNACSlB+k+DxFF |
| HqkwKa/ziFiAlkLQyyhm+iqz80htRZr7Ls/ZRYZl0aSU56/hLe0V+TviJ1s8qdN2 |
| lamkKdXIAFfavA04nOnTzyIBJ82EAUT3Nh45skMxo4z4iZMNmsyaQpNl/m/lNtOL |
| hR64v5ZybofB2EWkMxUzX8D/FQ== |
| =xe+/ |
| -----END PGP PUBLIC KEY BLOCK----- |
| """ |
| |
| ports_sources = """ |
| Types: deb |
| URIS: http://ports.ubuntu.com |
| Suites: jammy jammy-security jammy-updates |
| Components: main universe |
| Architectures: arm64 ppc64el |
| """ |
| |
| amd64_sources = """ |
| Types: deb |
| URIS: http://archive.ubuntu.com/ubuntu |
| Suites: jammy jammy-security jammy-updates |
| Components: main universe |
| Architectures: amd64 |
| """ |
| |
| def get_docker_file(self,tmpdir): |
| res = focal.get_docker_file(self,tmpdir); |
| self.fix_https(tmpdir) |
| |
| self.add_source_list(tmpdir, "llvm.sources", self.llvm_sources) |
| self.add_source_list(tmpdir, |
| "ubuntu-toolchain-r-ubuntu-test-jammy.sources", |
| self.gcc12_sources) |
| self.add_source_list(tmpdir, "ports.sources", self.ports_sources) |
| |
| # Replace the main sources so we can limit the architecture |
| self.add_source_list(tmpdir, "amd64.sources", self.amd64_sources) |
| |
| res.lines.insert(1,"ADD etc/ /etc/"); |
| res.lines.insert(1,"RUN rm /etc/apt/sources.list && " |
| "dpkg --add-architecture ppc64el && " |
| "dpkg --add-architecture arm64") |
| return res; |
| |
| # ------------------------------------------------------------------------- |
| |
| environments = [centos7(), |
| centos7_epel(), |
| centos8(), |
| centos9(), |
| amazonlinux2(), |
| xenial(), |
| bionic(), |
| focal(), |
| jammy(), |
| jessie(), |
| stretch(), |
| fc41(), |
| leap(), |
| tumbleweed(), |
| debian_experimental(), |
| azure_pipelines(), |
| bullseye(), |
| bullseye_i386(), |
| ]; |
| |
| class ToEnvActionPkg(argparse.Action): |
| """argparse helper to parse environment lists into environment classes""" |
| def __call__(self, parser, namespace, values, option_string=None): |
| if not isinstance(values,list): |
| values = [values]; |
| |
| res = set(); |
| for I in values: |
| if I == "all": |
| for env in environments: |
| if env.name != "centos7_epel": |
| res.add(env); |
| else: |
| for env in environments: |
| if env.name == I or I in env.aliases: |
| res.add(env); |
| setattr(namespace, self.dest, sorted(res,key=lambda x:x.name)) |
| |
| |
| class ToEnvAction(argparse.Action): |
| """argparse helper to parse environment lists into environment classes""" |
| def __call__(self, parser, namespace, values, option_string=None): |
| if not isinstance(values,list): |
| values = [values]; |
| |
| res = set(); |
| for I in values: |
| if I == "all": |
| res.update(environments); |
| else: |
| for env in environments: |
| if env.name == I or I in env.aliases: |
| res.add(env); |
| setattr(namespace, self.dest, sorted(res,key=lambda x:x.name)) |
| |
| def env_choices_pkg(): |
| """All the names that can be used with ToEnvAction""" |
| envs = set(("all",)); |
| for I in environments: |
| if getattr(I,"is_deb",False) or getattr(I,"is_rpm",False): |
| envs.add(I.name); |
| envs.update(I.aliases); |
| return envs; |
| |
| def env_choices(): |
| """All the names that can be used with ToEnvAction""" |
| envs = set(("all",)); |
| for I in environments: |
| envs.add(I.name); |
| envs.update(I.aliases); |
| return envs; |
| |
| def docker_cmd(env,*cmd): |
| """Invoke docker""" |
| cmd = list(cmd); |
| if env.sudo: |
| return subprocess.check_call(["sudo","docker"] + cmd); |
| return subprocess.check_call(["docker"] + cmd); |
| |
| def docker_cmd_str(env,*cmd): |
| """Invoke docker""" |
| cmd = list(cmd); |
| if env.sudo: |
| return subprocess.check_output(["sudo","docker"] + cmd).decode(); |
| return subprocess.check_output(["docker"] + cmd).decode(); |
| |
| @contextmanager |
| def private_tmp(args): |
| """Simple version of Python 3's tempfile.TemporaryDirectory""" |
| dfn = tempfile.mkdtemp(); |
| try: |
| yield dfn; |
| finally: |
| try: |
| shutil.rmtree(dfn); |
| except: |
| # The debian builds result in root owned files because we don't use fakeroot |
| subprocess.check_call(['sudo','rm','-rf',dfn]); |
| |
| @contextmanager |
| def inDirectory(dir): |
| cdir = os.getcwd(); |
| try: |
| os.chdir(dir); |
| yield True; |
| finally: |
| os.chdir(cdir); |
| |
| def map_git_args(src_root,to): |
| """Return a list of docker arguments that will map the .git directory into the |
| container""" |
| git_dir = subprocess.check_output([ |
| "git", |
| "-C", src_root, |
| "rev-parse", |
| "--absolute-git-dir", |
| ]).decode().strip() |
| if ".git/worktrees" in git_dir: |
| with open(os.path.join(git_dir, "commondir")) as F: |
| git_dir = os.path.join(git_dir, F.read().strip()) |
| git_dir = os.path.abspath(git_dir) |
| res = ["-v", "%s:%s:ro" % (os.path.join(src_root, ".git"), os.path.join(to, ".git")), |
| "-v", "%s:%s:ro" % (git_dir, git_dir)] |
| else: |
| res = ["-v", "%s:%s:ro" % (git_dir, os.path.join(to, ".git"))] |
| |
| alternates = os.path.join(git_dir, "objects/info/alternates") |
| if os.path.exists(alternates): |
| with open(alternates) as F: |
| for I in F.readlines(): |
| I = os.path.normpath(I.strip()) |
| res.extend(["-v","%s:%s:ro"%(I,I)]); |
| |
| return res; |
| |
| def get_image_id(args,image_name): |
| img = json.loads(docker_cmd_str(args,"inspect",image_name)); |
| image_id = img[0]["Id"]; |
| # Newer dockers put a prefix |
| if ":" in image_id: |
| image_id = image_id.partition(':')[2]; |
| return image_id; |
| |
| # ------------------------------------------------------------------------- |
| |
| def get_tar_file(args,tarfn,pandoc_prebuilt=False): |
| """Create a tar file that matches what buildlib/github-release would do if it |
| was a tagged release""" |
| prefix = "%s-%s/"%(project,get_version()); |
| if not pandoc_prebuilt: |
| subprocess.check_call(["git","archive", |
| # This must match the prefix generated buildlib/github-release |
| "--prefix",prefix, |
| "--output",tarfn, |
| "HEAD"]); |
| return; |
| |
| # When the OS does not support pandoc we got through the extra step to |
| # build pandoc output in the azp container and include it in the |
| # tar. |
| if not args.use_prebuilt_pandoc: |
| subprocess.check_call(["buildlib/cbuild","make","azure_pipelines","docs"]); |
| |
| cmd_make_dist_tar(argparse.Namespace(BUILD="build-azure_pipelines",tarfn=tarfn, |
| script_pwd="",tag=None)); |
| |
| def run_rpm_build(args,spec_file,env): |
| with open(spec_file,"r") as F: |
| for ln in F: |
| if ln.startswith("Version:"): |
| ver = ln.strip().partition(' ')[2].strip(); |
| assert(ver == get_version()); |
| |
| if ln.startswith("Source:"): |
| tarfn = ln.strip().partition(' ')[2].strip(); |
| |
| image_id = get_image_id(args,env.image_name()); |
| with private_tmp(args) as tmpdir: |
| os.mkdir(os.path.join(tmpdir,"SOURCES")); |
| os.mkdir(os.path.join(tmpdir,"tmp")); |
| |
| get_tar_file(args,os.path.join(tmpdir,"SOURCES",tarfn), |
| pandoc_prebuilt=not env.pandoc); |
| |
| with open(spec_file,"r") as inF: |
| spec = list(inF); |
| tspec_file = os.path.basename(spec_file); |
| with open(os.path.join(tmpdir,tspec_file),"w") as outF: |
| outF.write("".join(spec)); |
| |
| home = os.path.join(os.path.sep,"home",os.getenv("LOGNAME")); |
| vdir = os.path.join(home,"rpmbuild"); |
| |
| opts = [ |
| "run", |
| "--rm=true", |
| "-v","%s:%s"%(tmpdir,vdir), |
| "-w",vdir, |
| "-h","builder-%s"%(image_id[:12]), |
| "-e","HOME=%s"%(home), |
| "-e","TMPDIR=%s"%(os.path.join(vdir,"tmp")), |
| ]; |
| |
| # rpmbuild complains if we do not have an entry in passwd and group |
| # for the user we are going to use to do the build. |
| with open(os.path.join(tmpdir,"go.py"),"w") as F: |
| print(""" |
| import os,subprocess; |
| with open("/etc/passwd","a") as F: |
| F.write({passwd!r} + "\\n"); |
| with open("/etc/group","a") as F: |
| F.write({group!r} + "\\n"); |
| os.setgid({gid:d}); |
| os.setuid({uid:d}); |
| |
| # Get RPM to tell us the expected tar filename. |
| for ln in subprocess.check_output(["rpmspec","-P",{tspec_file!r}]).splitlines(): |
| if ln.startswith(b"Source:"): |
| tarfn = ln.strip().partition(b' ')[2].strip(); |
| if tarfn != {tarfn!r}: |
| os.symlink({tarfn!r},os.path.join(b"SOURCES",tarfn)); |
| """.format(passwd=":".join(str(I) for I in pwd.getpwuid(os.getuid())), |
| group=":".join(str(I) for I in grp.getgrgid(os.getgid())), |
| uid=os.getuid(), |
| gid=os.getgid(), |
| tarfn=tarfn, |
| tspec_file=tspec_file), file=F); |
| |
| extra_opts = getattr(env,"rpmbuild_options", []) |
| bopts = ["-bb",tspec_file] + extra_opts; |
| for arg in args.with_flags: |
| bopts.extend(["--with", arg]); |
| for arg in args.without_flags: |
| bopts.extend(["--without", arg]); |
| if "pyverbs" not in args.with_flags + args.without_flags: |
| if env.build_pyverbs: |
| bopts.extend(["--with", "pyverbs"]); |
| |
| print('os.execlp("rpmbuild","rpmbuild",%s)'%( |
| ",".join(repr(I) for I in bopts)), file=F); |
| |
| if args.run_shell: |
| opts.append("-ti"); |
| opts.append(env.image_name()); |
| |
| if args.run_shell: |
| opts.append("/bin/bash"); |
| else: |
| opts.extend([env.python_cmd,"go.py"]); |
| |
| docker_cmd(args,*opts) |
| |
| print() |
| for path,jnk,files in os.walk(os.path.join(tmpdir,"RPMS")): |
| for I in files: |
| print("Final RPM: ",os.path.join("..",I)); |
| shutil.move(os.path.join(path,I), |
| os.path.join("..",I)); |
| |
| def run_deb_build(args,env): |
| image_id = get_image_id(args,env.image_name()); |
| with private_tmp(args) as tmpdir: |
| os.mkdir(os.path.join(tmpdir,"src")); |
| os.mkdir(os.path.join(tmpdir,"tmp")); |
| |
| opwd = os.getcwd(); |
| with inDirectory(os.path.join(tmpdir,"src")): |
| subprocess.check_call(["git", |
| "--git-dir",os.path.join(opwd,".git"), |
| "reset","--hard","HEAD"]); |
| |
| home = os.path.join(os.path.sep,"home",os.getenv("LOGNAME")); |
| |
| opts = [ |
| "run", |
| "--read-only", |
| "--rm=true", |
| "-v","%s:%s"%(tmpdir,home), |
| "-w",os.path.join(home,"src"), |
| "-h","builder-%s"%(image_id[:12]), |
| "-e","HOME=%s"%(home), |
| "-e","TMPDIR=%s"%(os.path.join(home,"tmp")), |
| "-e","DEB_BUILD_OPTIONS=parallel=%u"%(multiprocessing.cpu_count()), |
| ]; |
| |
| # Create a go.py that will let us run the compilation as the user and |
| # then switch to root only for the packaging step. |
| with open(os.path.join(tmpdir,"go.py"),"w") as F: |
| print(""" |
| import subprocess,os; |
| def to_user(): |
| os.setgid({gid:d}); |
| os.setuid({uid:d}); |
| subprocess.check_call(["debian/rules","debian/rules","build"], |
| preexec_fn=to_user); |
| subprocess.check_call(["debian/rules","debian/rules","binary"]); |
| """.format(uid=os.getuid(), |
| gid=os.getgid()), file=F); |
| |
| if args.run_shell: |
| opts.append("-ti"); |
| opts.append(env.image_name()); |
| |
| if args.run_shell: |
| opts.append("/bin/bash"); |
| else: |
| opts.extend(["python3",os.path.join(home,"go.py")]); |
| |
| docker_cmd(args,*opts); |
| |
| print() |
| for I in os.listdir(tmpdir): |
| if I.endswith(".deb"): |
| print("Final DEB: ",os.path.join("..",I)); |
| shutil.move(os.path.join(tmpdir,I), |
| os.path.join("..",I)); |
| |
| def copy_abi_files(src): |
| """Retrieve the current ABI files and place them in the source tree.""" |
| if not os.path.isdir(src): |
| return; |
| |
| for path,jnk,files in os.walk(src): |
| for I in files: |
| if not I.startswith("current-"): |
| continue; |
| |
| ref_fn = os.path.join("ABI",I[8:]); |
| cur_fn = os.path.join(src, path, I); |
| |
| if os.path.isfile(ref_fn) and filecmp.cmp(ref_fn,cur_fn,False): |
| continue; |
| |
| print("Changed ABI File: ", ref_fn); |
| shutil.copy(cur_fn, ref_fn); |
| |
| def run_azp_build(args,env): |
| # Load the commands from the pipelines file |
| with open("buildlib/azure-pipelines.yml") as F: |
| azp = yaml.safe_load(F); |
| for bst in azp["stages"]: |
| if bst["stage"] == "Build": |
| break; |
| else: |
| raise ValueError("No Build stage found"); |
| for job in bst["jobs"]: |
| if job["job"] == "Compile": |
| break; |
| else: |
| raise ValueError("No Compile job found"); |
| |
| script = ["#!/bin/bash"] |
| workdir = "/__w/1" |
| srcdir = os.path.join(workdir,"s"); |
| for I in job["steps"]: |
| script.append("echo ==================================="); |
| script.append("echo %s"%(I["displayName"])); |
| script.append("cd %s"%(srcdir)); |
| if "bash" in I: |
| script.append(I["bash"]); |
| elif I.get("task") == "PythonScript@0": |
| script.append("set -e"); |
| if "workingDirectory" in I["inputs"]: |
| script.append("cd %s"%(os.path.join(srcdir,I["inputs"]["workingDirectory"]))); |
| script.append("%s %s %s"%(I["inputs"]["pythonInterpreter"], |
| os.path.join(srcdir,I["inputs"]["scriptPath"]), |
| I["inputs"].get("arguments",""))); |
| else: |
| raise ValueError("Unknown stanza %r"%(I)); |
| |
| with private_tmp(args) as tmpdir: |
| os.mkdir(os.path.join(tmpdir,"s")); |
| os.mkdir(os.path.join(tmpdir,"tmp")); |
| |
| opwd = os.getcwd(); |
| with inDirectory(os.path.join(tmpdir,"s")): |
| subprocess.check_call(["git", |
| "--git-dir",os.path.join(opwd,".git"), |
| "reset","--hard","HEAD"]); |
| subprocess.check_call(["git", |
| "--git-dir",os.path.join(opwd,".git"), |
| "fetch", |
| "--no-tags", |
| "https://github.com/linux-rdma/rdma-core.git","HEAD", |
| "master"]); |
| base = subprocess.check_output(["git", |
| "--git-dir",os.path.join(opwd,".git"), |
| "merge-base", |
| "HEAD","FETCH_HEAD"]).decode().strip(); |
| |
| opts = [ |
| "run", |
| "--read-only", |
| "--rm=true", |
| "-v","%s:%s"%(tmpdir, workdir), |
| "-w",srcdir, |
| "-u",str(os.getuid()), |
| "-e","SYSTEM_PULLREQUEST_SOURCECOMMITID=HEAD", |
| # azp puts the branch name 'master' here, we need to put a commit ID.. |
| "-e","SYSTEM_PULLREQUEST_TARGETBRANCH=%s"%(base), |
| "-e","HOME=%s"%(workdir), |
| "-e","TMPDIR=%s"%(os.path.join(workdir,"tmp")), |
| ] + map_git_args(opwd,srcdir); |
| |
| if args.run_shell: |
| opts.append("-ti"); |
| opts.append(env.image_name()); |
| |
| with open(os.path.join(tmpdir,"go.sh"),"w") as F: |
| F.write("\n".join(script)) |
| |
| if args.run_shell: |
| opts.append("/bin/bash"); |
| else: |
| opts.extend(["/bin/bash",os.path.join(workdir,"go.sh")]); |
| |
| try: |
| docker_cmd(args,*opts); |
| except subprocess.CalledProcessError as e: |
| copy_abi_files(os.path.join(tmpdir, "s/ABI")); |
| raise; |
| copy_abi_files(os.path.join(tmpdir, "s/ABI")); |
| |
| def args_pkg(parser): |
| parser.add_argument("ENV",action=ToEnvActionPkg,choices=env_choices_pkg()); |
| parser.add_argument("--run-shell",default=False,action="store_true", |
| help="Instead of running the build, enter a shell"); |
| parser.add_argument("--use-prebuilt-pandoc",default=False,action="store_true", |
| help="Do not rebuild the pandoc cache in build-azure_pipelines/pandoc-prebuilt/"); |
| parser.add_argument("--with", default=[],action="append", dest="with_flags", |
| help="Enable specified feature in RPM builds"); |
| parser.add_argument("--without", default=[],action="append", dest="without_flags", |
| help="Disable specified feature in RPM builds"); |
| def cmd_pkg(args): |
| """Build a package in the given environment.""" |
| for env in args.ENV: |
| if env.name == "azure_pipelines": |
| run_azp_build(args,env); |
| elif getattr(env,"is_deb",False): |
| run_deb_build(args,env); |
| elif getattr(env,"is_rpm",False): |
| run_rpm_build(args, |
| getattr(env,"specfile","%s.spec"%(project)), |
| env); |
| else: |
| print("%s does not support packaging"%(env.name)); |
| |
| # ------------------------------------------------------------------------- |
| |
| def args_make(parser): |
| parser.add_argument("--run-shell",default=False,action="store_true", |
| help="Instead of running the build, enter a shell"); |
| parser.add_argument("ENV",action=ToEnvAction,choices=env_choices()); |
| parser.add_argument('ARGS', nargs=argparse.REMAINDER); |
| def cmd_make(args): |
| """Run cmake and ninja within a docker container. If cmake has not yet been |
| run then this runs it with the given environment variables, then invokes ninja. |
| Otherwise ninja is invoked without calling cmake.""" |
| SRC = os.getcwd(); |
| |
| for env in args.ENV: |
| BUILD = "build-%s"%(env.name) |
| if not os.path.exists(BUILD): |
| os.mkdir(BUILD); |
| |
| home = os.path.join(os.path.sep,"home",os.getenv("LOGNAME")); |
| |
| dirs = [os.getcwd(),"/tmp"]; |
| # Import the symlink target too if BUILD is a symlink |
| BUILD_r = os.path.realpath(BUILD); |
| if not BUILD_r.startswith(os.path.realpath(SRC)): |
| dirs.append(BUILD_r); |
| |
| cmake_args = [] |
| if not env.build_pyverbs: |
| cmake_args.extend(["-DNO_PYVERBS=1"]); |
| |
| cmake_envs = [] |
| ninja_args = [] |
| for I in args.ARGS: |
| if I.startswith("-D"): |
| cmake_args.append(I); |
| elif I.find('=') != -1: |
| cmake_envs.append(I); |
| else: |
| ninja_args.append(I); |
| if env.use_make: |
| need_cmake = not os.path.exists(os.path.join(BUILD_r,"Makefile")); |
| else: |
| need_cmake = not os.path.exists(os.path.join(BUILD_r,"build.ninja")); |
| opts = ["run", |
| "--read-only", |
| "--rm=true", |
| "-ti", |
| "-u",str(os.getuid()), |
| "-e","HOME=%s"%(home), |
| "-w",BUILD_r, |
| ]; |
| opts.extend(env.docker_opts) |
| for I in dirs: |
| opts.append("-v"); |
| opts.append("%s:%s"%(I,I)); |
| for I in cmake_envs: |
| opts.append("-e"); |
| opts.append(I); |
| if args.run_shell: |
| opts.append("-ti"); |
| opts.append(env.image_name()); |
| |
| if args.run_shell: |
| os.execlp("sudo","sudo","docker",*(opts + ["/bin/bash"])); |
| |
| if need_cmake: |
| if env.use_make: |
| prog_args = ["cmake",SRC] + cmake_args; |
| else: |
| prog_args = ["cmake","-GNinja",SRC] + cmake_args; |
| docker_cmd(args,*(opts + prog_args)); |
| |
| if env.use_make: |
| prog_args = ["make","-C",BUILD_r] + ninja_args; |
| else: |
| prog_args = [getattr(env,"ninja_cmd","ninja"), |
| "-C",BUILD_r] + ninja_args; |
| |
| if len(args.ENV) <= 1: |
| os.execlp("sudo","sudo","docker",*(opts + prog_args)); |
| else: |
| docker_cmd(args,*(opts + prog_args)); |
| |
| # ------------------------------------------------------------------------- |
| |
| def get_build_args(args,env): |
| """Return extra docker arguments for building. This is the system APT proxy.""" |
| res = []; |
| if args.pull: |
| res.append("--pull"); |
| |
| if env.proxy and os.path.exists("/etc/apt/apt.conf.d/01proxy"): |
| # The line in this file must be 'Acquire::http { Proxy "http://xxxx:3142"; };' |
| with open("/etc/apt/apt.conf.d/01proxy") as F: |
| proxy = F.read().strip().split('"')[1]; |
| res.append("--build-arg"); |
| res.append('http_proxy=%s'%(proxy)); |
| return res; |
| |
| def args_build_images(parser): |
| parser.add_argument("ENV",nargs="+",action=ToEnvAction,choices=env_choices()); |
| parser.add_argument("--no-pull",default=True,action="store_false", |
| dest="pull", |
| help="Instead of running the build, enter a shell"); |
| def cmd_build_images(args): |
| """Run from the top level source directory to make the docker images that are |
| needed for building. This only needs to be run once.""" |
| # Docker copies the permissions from the local host and we need this umask |
| # to be 022 or the container breaks |
| os.umask(0o22) |
| for env in args.ENV: |
| with private_tmp(args) as tmpdir: |
| df = env.get_docker_file(tmpdir); |
| fn = os.path.join(tmpdir,"Dockerfile"); |
| with open(fn,"wt") as F: |
| for ln in df.lines: |
| print(ln, file=F); |
| opts = (["build"] + |
| get_build_args(args,env) + |
| env.docker_opts + |
| ["-f",fn, |
| "-t",env.image_name(), |
| tmpdir]); |
| print(opts) |
| docker_cmd(args,*opts); |
| |
| # ------------------------------------------------------------------------- |
| |
| def args_push_azp_images(args): |
| pass |
| def cmd_push_azp_images(args): |
| """Push the images required for Azure Pipelines to the container |
| registry. Must have done 'az login' first""" |
| subprocess.check_call(["sudo","az","acr","login","--name","ucfconsort"]); |
| with private_tmp(args) as tmpdir: |
| nfn = os.path.join(tmpdir,"build.ninja"); |
| with open(nfn,"w") as F: |
| F.write("""rule push |
| command = docker push $img |
| description=Push $img\n"""); |
| |
| for env in environments: |
| name = env.image_name() |
| if "ucfconsort.azurecr.io" not in name: |
| continue |
| F.write("build push_%s : push\n img = %s\n"%(env.name,env.image_name())); |
| F.write("default push_%s\n"%(env.name)); |
| subprocess.check_call(["sudo","ninja"],cwd=tmpdir); |
| |
| # ------------------------------------------------------------------------- |
| def args_make_dist_tar(parser): |
| parser.add_argument("BUILD",help="Path to the build directory") |
| parser.add_argument("--tarfn",help="Output TAR filename") |
| parser.add_argument("--tag",help="git tag to sanity check against") |
| def cmd_make_dist_tar(args): |
| """Make the standard distribution tar. The BUILD argument must point to a build |
| output directory that has pandoc-prebuilt""" |
| ver = get_version(); |
| |
| if not args.tarfn: |
| args.tarfn = "%s-%s.tar.gz"%(project,ver) |
| |
| # The tag name and the cmake file must match. |
| if args.tag: |
| assert args.tag == "v" + ver; |
| |
| os.umask(0o22) |
| with private_tmp(args) as tmpdir: |
| tmp_tarfn = os.path.join(tmpdir,"tmp.tar"); |
| |
| prefix = "%s-%s/"%(project,get_version()); |
| subprocess.check_call(["git","archive", |
| "--prefix",prefix, |
| "--output",tmp_tarfn, |
| "HEAD"]); |
| |
| # Mangle the paths and append the prebuilt stuff to the tar file |
| if args.BUILD: |
| subprocess.check_call([ |
| "tar", |
| "-C",os.path.join(args.script_pwd,args.BUILD,"pandoc-prebuilt"), |
| "-rf",tmp_tarfn, |
| "./", |
| "--xform",r"s|^\.|%sbuildlib/pandoc-prebuilt|g"%(prefix)]); |
| |
| assert args.tarfn.endswith(".gz") or args.tarfn.endswith(".tgz"); |
| with open(os.path.join(args.script_pwd,args.tarfn),"w") as F: |
| subprocess.check_call(["gzip","-9c",tmp_tarfn],stdout=F); |
| |
| # ------------------------------------------------------------------------- |
| |
| if __name__ == '__main__': |
| parser = argparse.ArgumentParser(description='Operate docker for building this package') |
| subparsers = parser.add_subparsers(title="Sub Commands",dest="command"); |
| subparsers.required = True; |
| |
| funcs = globals(); |
| for k,v in list(funcs.items()): |
| if k.startswith("cmd_") and inspect.isfunction(v): |
| sparser = subparsers.add_parser(k[4:].replace('_','-'), |
| help=v.__doc__); |
| sparser.required = True; |
| funcs["args_" + k[4:]](sparser); |
| sparser.set_defaults(func=v); |
| |
| try: |
| import argcomplete; |
| argcomplete.autocomplete(parser); |
| except ImportError: |
| pass; |
| |
| args = parser.parse_args(); |
| args.sudo = True; |
| |
| # This script must always run from the top of the git tree, and a git |
| # checkout is mandatory. |
| git_top = subprocess.check_output(["git","rev-parse","--show-toplevel"]).strip(); |
| args.script_pwd = os.getcwd(); |
| os.chdir(git_top); |
| |
| args.func(args); |