Skip to content

Run readelf -d on _pyflamegpu.so to check for differences between ubu… #1710

Run readelf -d on _pyflamegpu.so to check for differences between ubu…

Run readelf -d on _pyflamegpu.so to check for differences between ubu… #1710

Workflow file for this run

# Build manylinux wheels, and upload them to the action for testing within a short time frame
name: Manylinux2014
# Run on branch push events (i.e. not tag pushes) and on pull requests
on:
# Branch pushes that do not only modify other workflow files
push:
branches:
- '**'
paths:
- "**"
- "!.github/**"
- ".github/scripts/install_cuda_centos.sh"
- ".github/workflows/Manylinux2014.yml"
# Disabled for now. See https://github.com/FLAMEGPU/FLAMEGPU2/pull/644
# pull_request:
# Allow manual invocation.
workflow_dispatch:
defaults:
run:
shell: bash
# A single job, which builds manylinux2014 wheels, which ships with GCC 10.2.1 at the time of writing. If this bumps to unpatched 10.3 we might have issues w/ cuda.
jobs:
build:
runs-on: ${{ matrix.cudacxx.os }}
# Run steps inside a manylinux container.
container: quay.io/pypa/manylinux2014_x86_64
strategy:
fail-fast: false
# Multiplicative build matrix
# optional exclude: can be partial, include: must be specific
matrix:
cudacxx:
- cuda: "12.0"
cuda_arch: "50"
hostcxx: devtoolset-10
os: ubuntu-20.04
- cuda: "11.2"
cuda_arch: "35"
hostcxx: devtoolset-9
os: ubuntu-20.04
python:
- "3.12"
config:
- name: "Release"
config: "Release"
SEATBELTS: "ON"
VISUALISATION:
- "ON"
- "OFF"
# Name the job based on matrix/env options
name: "build (${{ matrix.cudacxx.cuda }}, ${{matrix.python}}, ${{ matrix.VISUALISATION }}, ${{ matrix.config.name }}, ${{ matrix.cudacxx.os }})"
env:
# Control if the wheel should be repaired. This will fail until .so's are addressed
AUDITWHEEL_REPAIR: "OFF"
MANYLINUX: "manylinux2014"
ARCH: "x86_64"
# Control if static GLEW should be built and used or not.
USE_STATIC_GLEW: "ON"
# Compute the wheelhouse name which should be unique within the matrix. This must be unique per build matrix/job combination
ARTIFACT_NAME: wheel-manylinux2014-${{ matrix.cudacxx.cuda }}-${{matrix.python}}-${{ matrix.VISUALISATION }}-${{ matrix.config.name }}-${{ matrix.cudacxx.os }}
# Define constants
BUILD_DIR: "build"
FLAMEGPU_BUILD_TESTS: "OFF"
# Conditional based on matrix via awkward almost ternary
FLAMEGPU_BUILD_PYTHON: ${{ fromJSON('{true:"ON",false:"OFF"}')[matrix.python != ''] }}
# Port matrix options to environment, for more portability.
CUDA: ${{ matrix.cudacxx.cuda }}
CUDA_ARCH: ${{ matrix.cudacxx.cuda_arch }}
HOSTCXX: ${{ matrix.cudacxx.hostcxx }}
OS: ${{ matrix.cudacxx.os }}
CONFIG: ${{ matrix.config.config }}
FLAMEGPU_SEATBELTS: ${{ matrix.config.SEATBELTS }}
PYTHON: ${{ matrix.python}}
VISUALISATION: ${{ matrix.VISUALISATION }}
steps:
- uses: actions/checkout@v3
# Downgrade the devtoolset in the image based on the build matrix, using:
# gcc-10 for CUDA >= 11.2. Unclear if devtoolset-10 will upgrade to unpatched 11.3 which breaks CUDA builds that use <chrono>.
# gcc-9 for CUDA >= 11.0
# these are not the officially supported toolset on centos by cuda, but it's what works.
- name: Install RHEL devtoolset (CentOS)
if: ${{ startsWith(env.HOSTCXX, 'devtoolset-') }}
run: |
# Install devtoolset-X
yum install -y ${{ env.HOSTCXX }}
# Enable the toolset via source not scl enable which doesn't get on with multi-step GHA
source /opt/rh/${{ env.HOSTCXX }}/enable
# Export the new environment / compilers for subsequent steps.
echo "PATH=${PATH}" >> $GITHUB_ENV
echo "CC=$(which gcc)" >> $GITHUB_ENV
echo "CXX=$(which g++)" >> $GITHUB_ENV
echo "CUDAHOSTCXX=$(which g++)" >> $GITHUB_ENV
- name: Install CUDA (CentOS)
if: ${{ env.CUDA != '' }}
env:
cuda: ${{ env.CUDA }}
run: .github/scripts/install_cuda_centos.sh
- name: Install Visualisation Dependencies (CentOS)
if: ${{ env.VISUALISATION == 'ON' }}
run: |
yum install -y glew-devel fontconfig-devel SDL2-devel freetype-devel
# Build/Install DevIL from source.
yum install -y freeglut-devel
git clone --depth 1 https://github.com/DentonW/DevIL.git
cd DevIL/DevIL
mkdir -p build
cd build
cmake .. -DCMAKE_BUILD_TYPE=${{ env.CONFIG }} -Wno-error=dev
make -j `nproc`
make install
- name: Build and install GLEW including static GLEW
if: ${{ env.VISUALISATION == 'ON' && env.USE_STATIC_GLEW == 'ON' }}
env:
GLEW_VERSION: "2.2.0"
run: |
yum install -y wget
wget https://github.com/nigels-com/glew/releases/download/glew-${{ env.GLEW_VERSION }}/glew-${{ env.GLEW_VERSION }}.tgz
tar -zxf glew-${{ env.GLEW_VERSION }}.tgz
cd glew-${{ env.GLEW_VERSION }}
make
make install
- name: Add custom problem matchers for annotations
run: echo "::add-matcher::.github/problem-matchers.json"
# This patches a bug where ManyLinux doesn't generate buildnumber as git dir is owned by diff user
- name: Enable git safe-directory
run: git config --global --add safe.directory $GITHUB_WORKSPACE
- name: Configure cmake
run: >
cmake . -B "${{ env.BUILD_DIR }}"
-DCMAKE_BUILD_TYPE="${{ env.CONFIG }}"
-Werror=dev
-DCMAKE_WARN_DEPRECATED="OFF"
-DFLAMEGPU_WARNINGS_AS_ERRORS="ON"
-DCMAKE_CUDA_ARCHITECTURES="${{ env.CUDA_ARCH }}"
-DFLAMEGPU_BUILD_TESTS="${{ env.FLAMEGPU_BUILD_TESTS }}"
-DFLAMEGPU_BUILD_PYTHON="${{ env.FLAMEGPU_BUILD_PYTHON }}"
-DPYTHON3_EXACT_VERSION="${{ env.PYTHON }}"
-DFLAMEGPU_VISUALISATION="${{ env.VISUALISATION }}"
-DFLAMEGPU_ENABLE_NVTX="ON"
-DGLEW_USE_STATIC_LIBS="${{ env.USE_STATIC_GLEW }}"
-DOpenGL_GL_PREFERENCE:STRING=LEGACY
- name: Build python wheel
if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }}
working-directory: ${{ env.BUILD_DIR }}
run: cmake --build . --target pyflamegpu --verbose -j `nproc`
- name: readelf
if: ${{ env.FLAMEGPU_BUILD_PYTHON == 'ON' }}
working-directory: ${{ env.BUILD_DIR }}
run: find . -name "_pyflamegpu.so" | head -n 1 | xargs readelf -d
# Run audithweel show for information, but do not repair.
- name: Run auditwheel show
working-directory: ${{ env.BUILD_DIR }}
run: auditwheel show lib/${{ env.CONFIG }}/python/dist/*whl
# Ideally we should use auditwheel repair to check/enforce conformity
# But we cannot due to cuda shared object (libcuda.so.1) dependencies which we cannot/shouldnot/wil not package into the wheel.
- name: Run auditwheel repair
if: ${{ env.AUDITWHEEL_REPAIR == 'ON' }}
working-directory: ${{ env.BUILD_DIR }}
run: auditwheel repair --plat ${{ env.MANYLINUX }}_${{ env.ARCH }} lib/${{ env.CONFIG }}/python/dist/*whl -w lib/${{ env.CONFIG }}/python/dist
# Upload wheel artifacts to the job on GHA, with a short retention
# Use a unique name per job matrix run, to avoid a risk of corruption according to the docs (although it should work with unique filenames)
- name: Upload Wheel Artifacts
if: ${{env.FLAMEGPU_BUILD_PYTHON == 'ON' }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.ARTIFACT_NAME }}
path: ${{ env.BUILD_DIR }}/lib/${{ env.CONFIG }}/python/dist/*.whl
if-no-files-found: error
retention-days: 5