From 8b985617c7d60432d61e2f8bdfd20fea084bf316 Mon Sep 17 00:00:00 2001 From: Thomas-Otavio Peulen Date: Fri, 16 Feb 2024 12:24:56 +0100 Subject: [PATCH] remove tools submodule --- .gitmodules | 3 - tools | 1 - tools/.condarc | 5 + tools/README.md | 6 + tools/build.bat | 2 + tools/build.sh | 3 + tools/conda_build_config.yaml | 856 ++++++++++++++++++++++++++++++++++ tools/deploy.sh | 4 + tools/doxy2swig.py | 838 +++++++++++++++++++++++++++++++++ tools/install_macos_sdk.sh | 33 ++ tools/setup.bat | 3 + tools/setup.sh | 3 + 12 files changed, 1753 insertions(+), 4 deletions(-) delete mode 100644 .gitmodules delete mode 160000 tools create mode 100644 tools/.condarc create mode 100644 tools/README.md create mode 100644 tools/build.bat create mode 100755 tools/build.sh create mode 100644 tools/conda_build_config.yaml create mode 100755 tools/deploy.sh create mode 100644 tools/doxy2swig.py create mode 100755 tools/install_macos_sdk.sh create mode 100644 tools/setup.bat create mode 100755 tools/setup.sh diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index 531336f..0000000 --- a/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "tools"] - path = tools - url = https://gitlab.peulen.xyz/build/tools diff --git a/tools b/tools deleted file mode 160000 index b4b69d4..0000000 --- a/tools +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b4b69d402c6e449559cce7a7520f42e08a6452fc diff --git a/tools/.condarc b/tools/.condarc new file mode 100644 index 0000000..036ce82 --- /dev/null +++ b/tools/.condarc @@ -0,0 +1,5 @@ +channels: + - conda-forge + - conda-forge/label/cf201901 + - tpeulen +channel_priority: strict diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 0000000..d07e850 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,6 @@ +# conda_config + +Conda build config files used in skd and chisurf + +These scripts/settings are meant to +be used with mambaforge docker. \ No newline at end of file diff --git a/tools/build.bat b/tools/build.bat new file mode 100644 index 0000000..4f775b6 --- /dev/null +++ b/tools/build.bat @@ -0,0 +1,2 @@ +call tools\setup.bat +conda mambabuild conda-recipe --output-folder bld-dir diff --git a/tools/build.sh b/tools/build.sh new file mode 100755 index 0000000..3908b5b --- /dev/null +++ b/tools/build.sh @@ -0,0 +1,3 @@ +./tools/setup.sh +mamba install -y boa +conda mambabuild conda-recipe --output-folder bld-dir diff --git a/tools/conda_build_config.yaml b/tools/conda_build_config.yaml new file mode 100644 index 0000000..9682a12 --- /dev/null +++ b/tools/conda_build_config.yaml @@ -0,0 +1,856 @@ +CONDA_BUILD_SYSROOT: + - /opt/MacOSX10.13.sdk # [osx] +c_compiler: + - gcc # [linux] + - clang # [osx] + - vs2019 # [win and x86_64] + - vs2022 # [win and arm64] +c_compiler_version: # [unix] + - 12 # [linux] + - 16 # [osx] + - 10 # [os.environ.get("CF_CUDA_ENABLED", "False") == "True" and linux] + - 11 # [os.environ.get("CF_CUDA_ENABLED", "False") == "True" and linux] +cxx_compiler: + - gxx # [linux] + - clangxx # [osx] + - vs2019 # [win and x86_64] + - vs2022 # [win and arm64] +cxx_compiler_version: # [unix] + - 12 # [linux] + - 16 # [osx] + - 10 # [os.environ.get("CF_CUDA_ENABLED", "False") == "True" and linux] + - 11 # [os.environ.get("CF_CUDA_ENABLED", "False") == "True" and linux] +llvm_openmp: # [osx] + - 16 # [osx] +fortran_compiler: # [unix or win64] + - gfortran # [linux64 or (osx and x86_64)] + - gfortran # [aarch64 or ppc64le or armv7l or s390x] + - flang # [win64] +fortran_compiler_version: # [unix or win64] + - 12 # [linux] + - 12 # [osx] + - 5 # [win64] + - 10 # [os.environ.get("CF_CUDA_ENABLED", "False") == "True" and linux] + - 11 # [os.environ.get("CF_CUDA_ENABLED", "False") == "True" and linux] +m2w64_c_compiler: # [win] + - m2w64-toolchain # [win] +m2w64_cxx_compiler: # [win] + - m2w64-toolchain # [win] +m2w64_fortran_compiler: # [win] + - m2w64-toolchain # [win] + +cuda_compiler: + - None + - nvcc # [(linux or win64) and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - nvcc # [(linux or win64) and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] +cuda_compiler_version: + - None + - 11.2 # [(linux or win64) and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - 11.8 # [(linux or win64) and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] +cuda_compiler_version_min: + - None # [osx] + - 11.2 # [linux or win64] + +arm_variant_type: # [aarch64] + - sbsa # [aarch64] + +_libgcc_mutex: + - 0.1 conda_forge +# +# Go Compiler Options +# + +# The basic go-compiler with CGO disabled, +# It generates fat binaries without libc dependencies +# The activation scripts will set your CC,CXX and related flags +# to invalid values. +go_compiler: + - go-nocgo +# The go compiler build with CGO enabled. +# It can generate fat binaries that depend on conda's libc. +# You should use this compiler if the underlying +# program needs to link against other C libraries, in which +# case make sure to add 'c,cpp,fortran_compiler' for unix +# and the m2w64 equivalent for windows. +cgo_compiler: + - go-cgo +# The following are helpful variables to simplify go meta.yaml files. +target_goos: + - linux # [linux] + - darwin # [osx] + - windows # [win] +target_goarch: + - amd64 # [x86_64] + - arm64 # [arm64 or aarch64] + - ppc64le # [ppc64le] +target_goexe: + - # [unix] + - .exe # [win] +target_gobin: + - '${PREFIX}/bin/' # [unix] + - '%PREFIX%\bin\' # [win] + +# Rust Compiler Options +rust_compiler: + - rust + +macos_machine: # [osx] + - x86_64-apple-darwin13.4.0 # [osx and x86_64] + - arm64-apple-darwin20.0.0 # [osx and arm64] +MACOSX_DEPLOYMENT_TARGET: # [osx] + - 11.0 # [osx and arm64] + - 10.9 # [osx and x86_64] +VERBOSE_AT: + - V=1 +VERBOSE_CM: + - VERBOSE=1 + +# dual build configuration +channel_sources: + - conda-forge # [not s390x] + - https://conda-web.anaconda.org/conda-forge # [s390x] + +channel_targets: + - conda-forge main + +cdt_name: # [linux] + - cos6 # [linux64 and os.environ.get("DEFAULT_LINUX_VERSION", "cos6") == "cos6"] + - cos7 # [linux64 and os.environ.get("DEFAULT_LINUX_VERSION", "cos6") == "cos7"] + - cos7 # [linux and aarch64] + - cos7 # [linux and ppc64le] + - cos7 # [linux and armv7l] + - cos7 # [linux and s390x] + + - cos7 # [linux and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - cos7 # [linux and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + +docker_image: # [os.environ.get("BUILD_PLATFORM", "").startswith("linux-")] + # Native builds + - quay.io/condaforge/linux-anvil-cos7-x86_64 # [os.environ.get("BUILD_PLATFORM") == "linux-64"] + - quay.io/condaforge/linux-anvil-aarch64 # [os.environ.get("BUILD_PLATFORM") == "linux-aarch64"] + - quay.io/condaforge/linux-anvil-ppc64le # [os.environ.get("BUILD_PLATFORM") == "linux-ppc64le"] + - quay.io/condaforge/linux-anvil-armv7l # [os.environ.get("BUILD_PLATFORM") == "linux-armv7l"] + + # CUDA 11.2 + - quay.io/condaforge/linux-anvil-cuda:11.2 # [linux64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-64"] + # CUDA 11.2 arch: native compilation (build == target) + - quay.io/condaforge/linux-anvil-ppc64le-cuda:11.2 # [ppc64le and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-ppc64le"] + - quay.io/condaforge/linux-anvil-aarch64-cuda:11.2 # [aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-aarch64"] + # CUDA 11.2 arch: cross-compilation (build != target) + - quay.io/condaforge/linux-anvil-cuda:11.2 # [ppc64le and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-64"] + - quay.io/condaforge/linux-anvil-cuda:11.2 # [aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-64"] + + # CUDA 11.8 + - quay.io/condaforge/linux-anvil-cuda:11.8 # [linux64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-64"] + # CUDA 11.8 arch: native compilation (build == target) + - quay.io/condaforge/linux-anvil-ppc64le-cuda:11.8 # [ppc64le and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-ppc64le"] + - quay.io/condaforge/linux-anvil-aarch64-cuda:11.8 # [aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-aarch64"] + # CUDA 11.8 arch: cross-compilation (build != target) + - quay.io/condaforge/linux-anvil-cuda:11.8 # [ppc64le and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-64"] + - quay.io/condaforge/linux-anvil-cuda:11.8 # [aarch64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM") == "linux-64"] + +zip_keys: + - # [unix] + - c_compiler_version # [unix] + - cxx_compiler_version # [unix] + - fortran_compiler_version # [unix] + - cuda_compiler # [linux and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - cuda_compiler_version # [linux and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - cdt_name # [linux and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - docker_image # [linux and os.environ.get("CF_CUDA_ENABLED", "False") == "True" and os.environ.get("BUILD_PLATFORM", "").startswith("linux-")] + - # [win64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - cuda_compiler # [win64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - cuda_compiler_version # [win64 and os.environ.get("CF_CUDA_ENABLED", "False") == "True"] + - + - python + - numpy + - python_impl + # transition until arrow_cpp can be dropped for arrow 13.x + - + - arrow_cpp + - libarrow + - libarrow_all + # as of 4.23.x, libprotobuf requires patch-level run-exports; + # we couple it with grpc (which very roughly releases in sync) + # to reduce the migration pain for these two libs a bit. + - + - libgrpc + - libprotobuf + + +# aarch64 specifics because conda-build sets many things to centos 6 +# this can probably be removed when conda-build gets updated defaults +# for aarch64 +cdt_arch: aarch64 # [aarch64] +BUILD: aarch64-conda_cos7-linux-gnu # [aarch64] + +# armv7l specifics because conda-build sets many things to centos 6 +# this can probably be removed when conda-build gets updated defaults +# for aarch64 +cdt_arch: armv7l # [armv7l] +BUILD: armv7-conda_cos7-linux-gnueabihf # [armv7l] + +pin_run_as_build: + # TODO: add run_exports to the following feedstocks + flann: + max_pin: x.x.x + graphviz: + max_pin: x + libsvm: + max_pin: x + netcdf-cxx4: + max_pin: x.x + occt: + max_pin: x.x + poppler: + max_pin: x.x + r-base: + max_pin: x.x + min_pin: x.x + vlfeat: + max_pin: x.x.x + +# Pinning packages + +# blas +libblas: + - 3.9 *netlib +libcblas: + - 3.9 *netlib +liblapack: + - 3.9 *netlib +liblapacke: + - 3.9 *netlib +blas_impl: + - openblas + - mkl # [x86 or x86_64] + - blis # [x86 or x86_64] + +# this output was dropped as of libabseil 20230125 +abseil_cpp: + - '20220623.0' +alsa_lib: + - 1.2.10 +antic: + - 0.2 +aom: + - '3.7' +arb: + - '2.23' +arpack: + - '3.8' +# keep in sync with libarrow +arrow_cpp: + - 14 # does not exist; switch to libarrow + - 13 + - 12 + - 11.0.0 +assimp: + - 5.3.1 +attr: + - 2.5 +aws_c_auth: + - 0.7.14 +aws_c_cal: + - 0.6.9 +aws_c_common: + - 0.9.12 +aws_c_compression: + - 0.2.17 +# coupled to aws_c_common version bump, see +# https://github.com/conda-forge/aws-c-http-feedstock/pull/109 +aws_c_event_stream: + - 0.4.1 +aws_c_http: + - 0.8.0 +# the builds got coupled because 0.2.4 landed before the this migrator finished +aws_c_io: + - 0.14.2 +# the builds got coupled because 0.2.4 landed before the io migrator +aws_c_mqtt: + - 0.10.1 +aws_c_s3: + - 0.5.0 +aws_c_sdkutils: + - 0.1.14 +aws_checksums: + - 0.1.17 +aws_crt_cpp: + - 0.26.1 +aws_sdk_cpp: + - 1.11.210 +bullet_cpp: + - 3.25 +bzip2: + - 1 +c_ares: + - 1 +cairo: + - 1 +capnproto: + - 0.10.2 +ccr: + - 1.3 +cfitsio: + - 4.3.0 +coin_or_cbc: + - 2.10 +coincbc: + - 2.10 +coin_or_cgl: + - 0.60 +coin_or_clp: + - 1.17 +coin_or_osi: + - 0.108 +coin_or_utils: + - 2.11 +console_bridge: + - 1.0 +cudnn: + - 8 +cutensor: + - 1 +curl: + - 8 +dav1d: + - 1.2.1 +davix: + - '0.8' +dbus: + - 1 +dcap: + - 2.47 +eclib: + - '20231211' +elfutils: + - 0.190 +exiv2: + - 0.27 +expat: + - 2 +ffmpeg: + - '6' +fftw: + - 3 +flann: + - 1.9.2 +flatbuffers: + - 23.5.26 +fmt: + - '9' +fontconfig: + - 2 +freetype: + - 2 +gct: + - 6.2.1629922860 +gf2x: + - '1.3' +gdk_pixbuf: + - 2 +gnuradio_core: + - 3.10.9 +gnutls: + - 3.7 +gsl: + - 2.7 +gsoap: + - 2.8.123 +gstreamer: + - '1.22' +gst_plugins_base: + - '1.22' +gdal: + - '3.8' +geos: + - 3.12.1 +geotiff: + - 1.7.1 +gfal2: + - '2.21' +gflags: + - 2.2 +giflib: + - 5.2 +glew: + - 2.1 +glib: + - '2' +glog: + - '0.6' +glpk: + - '5.0' +gmp: + - 6 +# keep google_cloud_cpp in sync with libgoogle_cloud +google_cloud_cpp: + - '2.12' +google_cloud_cpp_common: + - 0.25.0 +googleapis_cpp: + - '0.10' +graphviz: + - '9' +# this has been renamed to libgrpc as of 1.49; dropped as of 1.52. +# IOW, this version is unavailable; makes the renaming more obvious +grpc_cpp: + - '1.52' +harfbuzz: + - '8' +hdf4: + - 4.2.15 +hdf5: + - 1.14.3 +icu: + - '73' +imath: + - 3.1.9 +ipopt: + - 3.14.13 +isl: + - '0.26' +jasper: + - 4 +jpeg: + - 9 +lcms: + - 2 +lerc: + - '4' +libjpeg_turbo: + - '3' +libev: + - 4.33 +json_c: + - '0.17' +jsoncpp: + - 1.9.5 +kealib: + - '1.5' +krb5: + - '1.20' +ldas_tools_framecpp: + - '2.9' +libabseil: + - '20230802' +libabseil_static: + - '20220623.0' +libaec: + - '1' +libarchive: + - '3.7' +# keep in sync with arrow_cpp (libarrow exists only from 10.x, +# but make sure we have same length for zip as arrow_cpp) +libarrow: + - 14 + - 13 + - 12 + - 11.0.0 +# only exists as of arrow v14, but needs to have same length as libarrow +libarrow_all: + - 14 + - 13 + - 12 + - 11.0.0 +libavif: + - '1.0.1' +libblitz: + - 1.0.2 +libboost_devel: + - '1.82' +libboost_python_devel: + - '1.82' +libcint: + - '5.5' +libcurl: + - 8 +libcrc32c: + - 1.1 +libdap4: + - 3.20.6 +libdeflate: + - '1.19' +libduckdb_devel: + - '0.9.2' +libeantic: + - 1 +libevent: + - 2.1.12 +libexactreal: + - '4' +libffi: + - '3.4' +libflatsurf: + - 3 +libflint: + - '2.9' +libframel: + - '8.41' +libgdal: + - '3.8' +libgit2: + - '1.7' +# Keep in sync with google_cloud_cpp +libgoogle_cloud: + - '2.12' +libgrpc: + - '1.58' +libhugetlbfs: + - 2 +libhwloc: + - '2.9.1' +libhwy: + - '1.0' +libiconv: + - 1 +libidn2: + - 2 +libintervalxt: + - 3 +libkml: + - 1.3 +libiio: + - 0 +libmed: + - '4.1' +libmatio: + - 1.5.26 +libmatio_cpp: + - 0.2.3 +libmicrohttpd: + - 0.9 +libnetcdf: + - 4.9.2 +libopencv: + - 4.9.0 +libopentelemetry_cpp: + - '1.13' +libosqp: + - 0.6.3 +libopenvino_dev: + - '2023.3.0' +libpcap: + - '1.10' +libpng: + - 1.6 +libprotobuf: + - 4.24.3 +libpq: + - '16' +libraw: + - '0.21' +librdkafka: + - '2.2' +librsvg: + - 2 +libsecret: + - 0.18 +libsentencepiece: + - '0.1.99' +libsndfile: + - '1.2' +libsoup: + - 3 +libspatialindex: + - 1.9.3 +libssh: + - 0.10 +libssh2: + - 1 +libsvm: + - '332' +# keep libsqlite in sync with sqlite +libsqlite: + - 3 +libtensorflow: + - "2.15" +libtensorflow_cc: + - "2.15" +libthrift: + - 0.18.1 +libtiff: + - '4.6' +# keep in synch with pytorch +libtorch: + - '2.1' +libunwind: + - '1.6' +libv8: + - 8.9.83 +libvips: + - 8 +libwebp: + - 1 +libwebp_base: + - 1 +libxml2: + - 2 +libxsmm: + - 1 +libuuid: + - 2 +libzip: + - 1 +lmdb: + - 0.9.29 +log4cxx: + - 1.2.0 +lz4_c: + - '1.9.3' +lzo: + - 2 +metis: + - 5.1.0 +mimalloc: + - 2.1.2 +mkl: + - '2023' +mkl_devel: + - 2023 +mpg123: + - '1.32' +mpich: + - 4 +mpfr: + - 4 +msgpack_c: + - 6 +msgpack_cxx: + - 6 +mumps_mpi: + - 5.6.2 +mumps_seq: + - 5.6.2 +nccl: + - 2 +ncurses: + - 6 +netcdf_cxx4: + - 4.3 +netcdf_fortran: + - '4.6' +nettle: + - '3.9' +nodejs: + - '20' + - '18' +nss: + - 3 +nspr: + - 4 +nlopt: + - '2.7' +ntl: + - '11.4.3' +# we build for the oldest version possible of numpy for forward compatibility +# we roughly follow NEP29 in choosing the oldest version +numpy: + # part of a zip_keys: python, python_impl, numpy + - 1.22 + - 1.22 + - 1.22 + - 1.23 +occt: + - 7.7.2 +openblas: + - 0.3.* +openexr: + - '3.2' +openh264: + - 2.4.0 +openjpeg: + - '2' +openmpi: + - 4 +openslide: + - 4 +openssl: + - '3' +orc: + - 1.9.2 +pango: + - 1.50 +pari: + - 2.15.* *_pthread +pcl: + - 1.13.1 +perl: + - 5.32.1 +petsc: + - '3.20' +petsc4py: + - '3.20' +pugixml: + - '1.13' +slepc: + - '3.20' +slepc4py: + - '3.20' +svt_av1: + - 1.8.0 +p11_kit: + - '0.24' +pcre: + - '8' +pcre2: + - '10.42' +pixman: + - 0 +poco: + - 1.12.4 +poppler: + - '23.07' +postgresql: + - '16' +postgresql_plpython: + - '16' +proj: + - 9.3.1 +pulseaudio: + - '16.1' +pulseaudio_client: + - '16.1' +pulseaudio_daemon: + - '16.1' +pybind11_abi: + - 4 +python: + # part of a zip_keys: python, python_impl, numpy + - 3.8.* *_cpython + - 3.9.* *_cpython + - 3.10.* *_cpython + - 3.11.* *_cpython +python_impl: + # part of a zip_keys: python, python_impl, numpy + - cpython + - cpython + - cpython + - cpython +# Keep in sync with libtorch +pytorch: + - '2.1' +pyqt: + - 5.15 +pyqtwebengine: + - 5.15 +pyqtchart: + - 5.15 +qt: + - 5.15 +qt_main: + - 5.15 +qt6_main: + - '6.6' +qtkeychain: + - '0.14' +rdma_core: + - '49' +re2: + - 2023.06.02 +readline: + - "8" +rocksdb: + - '8.0' +root_base: + - 6.28.10 +ruby: + - 2.5 + - 2.6 +r_base: + - 4.1 # [win] + - 4.2 # [not win] + - 4.3 # [not win] +libscotch: + - 7.0.4 +libptscotch: + - 7.0.4 +scotch: + - 7.0.4 +ptscotch: + - 7.0.4 +s2n: + - 1.4.2 +sdl2: + - '2' +sdl2_image: + - '2' +sdl2_mixer: + - '2' +sdl2_net: + - '2' +sdl2_ttf: + - '2' +singular: + - 4.3.2.p8 +snappy: + - 1 +soapysdr: + - '0.8' +sox: + - 14.4.2 +spdlog: + - '1.11' +# keep sqlite in sync with libsqlite +sqlite: + - 3 +srm_ifce: + - 1.24.6 +starlink_ast: + - '9.2.7' +suitesparse: + - 5 +superlu_dist: + - '8' +tbb: + - '2021' +tbb_devel: + - '2021' +tensorflow: + - "2.15" +thrift_cpp: + - 0.18.1 +tinyxml2: + - '10' +tk: + - 8.6 # [not ppc64le] +tiledb: + - '2.19' +ucx: + - '1.15.0' +uhd: + - 4.6.0 +urdfdom: + - 3.1 +vc: # [win] + - 14 # [win] +vlfeat: + - 0.9.21 +volk: + - '3.1' +vtk: + - 9.2.6 +wcslib: + - '8' +wxwidgets: + - '3.2' +x264: + - '1!164.*' +x265: + - '3.5' +xerces_c: + - 3.2 +xrootd: + - '5' +xz: + - 5 +zeromq: + - '4.3.5' +zfp: + - 1.0 +zlib: + - 1.2 +zlib_ng: + - 2.0 +zstd: + - '1.5' diff --git a/tools/deploy.sh b/tools/deploy.sh new file mode 100755 index 0000000..5bdcf86 --- /dev/null +++ b/tools/deploy.sh @@ -0,0 +1,4 @@ +source activate +mamba install anaconda-client +if [[ "$CI_COMMIT_REF_NAME" == "master" ]]; then DEPLOY_LABEL=main; else DEPLOY_LABEL=nightly; fi +anaconda -t ${ANACONDA_API_TOKEN} upload -l ${DEPLOY_LABEL} -u ${CONDA_USER} --skip bld-dir/**/*.tar.bz2 diff --git a/tools/doxy2swig.py b/tools/doxy2swig.py new file mode 100644 index 0000000..ca513a6 --- /dev/null +++ b/tools/doxy2swig.py @@ -0,0 +1,838 @@ +#!/usr/bin/env python +"""doxy2swig.py [options] index.xml output.i + +Doxygen XML to SWIG docstring converter (improved version). + +Converts Doxygen generated XML files into a file containing docstrings +for use by SWIG. + +index.xml is your doxygen generated XML file and output.i is where the +output will be written (the file will be clobbered). +""" +# +# The current version of this code is hosted on a github repository: +# https://github.com/m7thon/doxy2swig +# +# This code is implemented using Mark Pilgrim's code as a guideline: +# http://www.faqs.org/docs/diveintopython/kgp_divein.html +# +# Original Author: Prabhu Ramachandran +# Modified by: Michael Thon (June 2015) +# License: BSD style +# +# Thanks: +# Johan Hake: the include_function_definition feature +# Bill Spotz: bug reports and testing. +# Sebastian Henschel: Misc. enhancements. +# +# Changes: +# June 2015 (Michael Thon): +# - class documentation: +# -c: add constructor call signatures and a "Constructors" section +# collecting the respective docs (e.g. for python) +# -a: add "Attributes" section collecting the documentation for member +# variables (e.g. for python) +# - overloaded functions: +# -o: collect all documentation into one "Overloaded function" section +# - option to include function definition / signature renamed to -f +# - formatting: +# + included function signatures slightly reformatted +# + option (-t) to turn off/on type information for funciton signatures +# + lists (incl. nested and ordered) +# + attempt to produce docstrings that render nicely as markdown +# + translate code, emphasis, bold, linebreak, hruler, blockquote, +# verbatim, heading tags to markdown +# + new text-wrapping and option -w to specify the text width +# + +from xml.dom import minidom +import re +import textwrap +import sys +import os.path +import optparse + + +def my_open_read(source): + if hasattr(source, "read"): + return source + else: + try: + return open(source, encoding='utf-8') + except TypeError: + return open(source) + +def my_open_write(dest): + if hasattr(dest, "write"): + return dest + else: + try: + return open(dest, 'w', encoding='utf-8') + except TypeError: + return open(dest, 'w') + +# MARK: Text handling: +def shift(txt, indent = ' ', prepend = ''): + """Return a list corresponding to the lines of text in the `txt` list + indented by `indent`. Prepend instead the string given in `prepend` to the + beginning of the first line. Note that if len(prepend) > len(indent), then + `prepend` will be truncated (doing better is tricky!). This preserves a + special '' entry at the end of `txt` (see `do_para` for the meaning). + """ + if type(indent) is int: + indent = indent * ' ' + special_end = txt[-1:] == [''] + lines = ''.join(txt).splitlines(True) + for i in range(1,len(lines)): + if lines[i].strip() or indent.strip(): + lines[i] = indent + lines[i] + if not lines: + return prepend + prepend = prepend[:len(indent)] + indent = indent[len(prepend):] + lines[0] = prepend + indent + lines[0] + ret = [''.join(lines)] + if special_end: + ret.append('') + return ret + +class Doxy2SWIG: + """Converts Doxygen generated XML files into a file containing + docstrings that can be used by SWIG-1.3.x that have support for + feature("docstring"). Once the data is parsed it is stored in + self.pieces. + + """ + + def __init__(self, src, + with_function_signature = False, + with_type_info = False, + with_constructor_list = False, + with_attribute_list = False, + with_overloaded_functions = False, + textwidth = 80, + quiet = False): + """Initialize the instance given a source object. `src` can + be a file or filename. If you do not want to include function + glossary.rst from doxygen then set + `include_function_definition` to `False`. This is handy since + this allows you to use the swig generated function definition + using %feature("autodoc", [0,1]). + + """ + # options: + self.with_function_signature = with_function_signature + self.with_type_info = with_type_info + self.with_constructor_list = with_constructor_list + self.with_attribute_list = with_attribute_list + self.with_overloaded_functions = with_overloaded_functions + self.textwidth = textwidth + self.quiet = quiet + + # state: + self.indent = 0 + self.listitem = '' + self.pieces = [] + + f = my_open_read(src) + self.my_dir = os.path.dirname(f.name) + self.xmldoc = minidom.parse(f).documentElement + f.close() + + self.pieces.append('\n// File: %s\n' % + os.path.basename(f.name)) + + self.space_re = re.compile(r'\s+') + self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)') + self.multi = 0 + self.ignores = ['inheritancegraph', 'param', 'listofallmembers', + 'innerclass', 'name', 'declname', 'incdepgraph', + 'invincdepgraph', 'programlisting', 'type', + 'references.rst', 'referencedby', 'location', + 'collaborationgraph', 'reimplements', + 'reimplementedby', 'derivedcompoundref', + 'basecompoundref', + 'argsstring', 'definition', 'exceptions'] + #self.generics = [] + + def generate(self): + """Parses the file set in the initialization. The resulting + data is stored in `self.pieces`. + + """ + self.parse(self.xmldoc) + + def write(self, fname): + o = my_open_write(fname) + o.write(''.join(self.pieces)) + o.write('\n') + o.close() + + def parse(self, node): + """Parse a given node. This function in turn calls the + `parse_` functions which handle the respective + nodes. + + """ + pm = getattr(self, "parse_%s" % node.__class__.__name__) + pm(node) + + def parse_Document(self, node): + self.parse(node.documentElement) + + def parse_Text(self, node): + txt = node.data + if txt == ' ': + # this can happen when two tags follow in a text, e.g., + # " ... $..." etc. + # here we want to keep the space. + self.add_text(txt) + return + txt = txt.replace('\\', r'\\') + txt = txt.replace('"', r'\"') + # ignore pure whitespace + m = self.space_re.match(txt) + if not (m and len(m.group()) == len(txt)): + self.add_text(txt) + + def parse_Comment(self, node): + """Parse a `COMMENT_NODE`. This does nothing for now.""" + return + + def parse_Element(self, node): + """Parse an `ELEMENT_NODE`. This calls specific + `do_` handers for different elements. If no handler + is available the `subnode_parse` method is called. All + tagNames specified in `self.ignores` are simply ignored. + + """ + name = node.tagName + ignores = self.ignores + if name in ignores: + return + attr = "do_%s" % name + if hasattr(self, attr): + handlerMethod = getattr(self, attr) + handlerMethod(node) + else: + self.subnode_parse(node) + #if name not in self.generics: self.generics.append(name) + +# MARK: Special format parsing + def subnode_parse(self, node, pieces=None, indent=0, ignore=[], restrict=None): + """Parse the subnodes of a given node. Subnodes with tags in the + `ignore` list are ignored. If pieces is given, use this as target for + the parse results instead of self.pieces. Indent all lines by the amount + given in `indent`. Note that the initial content in `pieces` is not + indented. The final result is in any case added to self.pieces.""" + if pieces is not None: + old_pieces, self.pieces = self.pieces, pieces + else: + old_pieces = [] + if type(indent) is int: + indent = indent * ' ' + if len(indent) > 0: + pieces = ''.join(self.pieces) + i_piece = pieces[:len(indent)] + if self.pieces[-1:] == ['']: + self.pieces = [pieces[len(indent):]] + [''] + elif self.pieces != []: + self.pieces = [pieces[len(indent):]] + self.indent += len(indent) + for n in node.childNodes: + if restrict is not None: + if n.nodeType == n.ELEMENT_NODE and n.tagName in restrict: + self.parse(n) + elif n.nodeType != n.ELEMENT_NODE or n.tagName not in ignore: + self.parse(n) + if len(indent) > 0: + self.pieces = shift(self.pieces, indent, i_piece) + self.indent -= len(indent) + old_pieces.extend(self.pieces) + self.pieces = old_pieces + + def surround_parse(self, node, pre_char, post_char): + """Parse the subnodes of a given node. Subnodes with tags in the + `ignore` list are ignored. Prepend `pre_char` and append `post_char` to + the output in self.pieces.""" + self.add_text(pre_char) + self.subnode_parse(node) + self.add_text(post_char) + +# MARK: Helper functions + def get_specific_subnodes(self, node, name, recursive=0): + """Given a node and a name, return a list of child `ELEMENT_NODEs`, that + have a `tagName` matching the `name`. Search recursively for `recursive` + levels. + """ + children = [x for x in node.childNodes if x.nodeType == x.ELEMENT_NODE] + ret = [x for x in children if x.tagName == name] + if recursive > 0: + for x in children: + ret.extend(self.get_specific_subnodes(x, name, recursive-1)) + return ret + + def get_specific_nodes(self, node, names): + """Given a node and a sequence of strings in `names`, return a + dictionary containing the names as keys and child + `ELEMENT_NODEs`, that have a `tagName` equal to the name. + + """ + nodes = [(x.tagName, x) for x in node.childNodes + if x.nodeType == x.ELEMENT_NODE and + x.tagName in names] + return dict(nodes) + + def add_text(self, value): + """Adds text corresponding to `value` into `self.pieces`.""" + if isinstance(value, (list, tuple)): + self.pieces.extend(value) + else: + self.pieces.append(value) + + def start_new_paragraph(self): + """Make sure to create an empty line. This is overridden, if the previous + text ends with the special marker ''. In that case, nothing is done. + """ + if self.pieces[-1:] == ['']: # respect special marker + return + elif self.pieces == []: # first paragraph, add '\n', override with '' + self.pieces = ['\n'] + elif self.pieces[-1][-1:] != '\n': # previous line not ended + self.pieces.extend([' \n' ,'\n']) + else: #default + self.pieces.append('\n') + + def add_line_with_subsequent_indent(self, line, indent=4): + """Add line of text and wrap such that subsequent lines are indented + by `indent` spaces. + """ + if isinstance(line, (list, tuple)): + line = ''.join(line) + line = line.strip() + width = self.textwidth-self.indent-indent + wrapped_lines = textwrap.wrap(line[indent:], width=width) + for i in range(len(wrapped_lines)): + if wrapped_lines[i] != '': + wrapped_lines[i] = indent * ' ' + wrapped_lines[i] + self.pieces.append(line[:indent] + '\n'.join(wrapped_lines)[indent:] + ' \n') + + def extract_text(self, node): + """Return the string representation of the node or list of nodes by parsing the + subnodes, but returning the result as a string instead of adding it to `self.pieces`. + Note that this allows extracting text even if the node is in the ignore list. + """ + if not isinstance(node, (list, tuple)): + node = [node] + pieces, self.pieces = self.pieces, [''] + for n in node: + for sn in n.childNodes: + self.parse(sn) + ret = ''.join(self.pieces) + self.pieces = pieces + return ret + + def get_function_signature(self, node): + """Returns the function signature string for memberdef nodes.""" + name = self.extract_text(self.get_specific_subnodes(node, 'name')) + if self.with_type_info: + argsstring = self.extract_text(self.get_specific_subnodes(node, 'argsstring')) + else: + argsstring = [] + param_id = 1 + for n_param in self.get_specific_subnodes(node, 'param'): + declname = self.extract_text(self.get_specific_subnodes(n_param, 'declname')) + if not declname: + declname = 'arg' + str(param_id) + defval = self.extract_text(self.get_specific_subnodes(n_param, 'defval')) + if defval: + defval = '=' + defval + argsstring.append(declname + defval) + param_id = param_id + 1 + argsstring = '(' + ', '.join(argsstring) + ')' + type = self.extract_text(self.get_specific_subnodes(node, 'type')) + function_definition = name + argsstring + if type != '' and type != 'void': + function_definition = function_definition + ' -> ' + type + return '`' + function_definition + '` ' + +# MARK: Special parsing tasks (need to be called manually) + def make_constructor_list(self, constructor_nodes, classname): + """Produces the "Constructors" section and the constructor signatures + (since swig does not do so for classes) for class docstrings.""" + if constructor_nodes == []: + return + self.add_text(['\n', 'Constructors', + '\n', '------------']) + for n in constructor_nodes: + self.add_text('\n') + self.add_line_with_subsequent_indent('* ' + self.get_function_signature(n)) + self.subnode_parse(n, pieces = [], indent=4, ignore=['definition', 'name']) + + def make_attribute_list(self, node): + """Produces the "Attributes" section in class docstrings for public + member variables (attributes). + """ + atr_nodes = [] + for n in self.get_specific_subnodes(node, 'memberdef', recursive=2): + if n.attributes['kind'].value == 'variable' and n.attributes['prot'].value == 'public': + atr_nodes.append(n) + if not atr_nodes: + return + self.add_text(['\n', 'Attributes', + '\n', '----------']) + for n in atr_nodes: + name = self.extract_text(self.get_specific_subnodes(n, 'name')) + self.add_text(['\n* ', '`', name, '`', ' : ']) + self.add_text(['`', self.extract_text(self.get_specific_subnodes(n, 'type')), '`']) + self.add_text(' \n') + restrict = ['briefdescription', 'detaileddescription'] + self.subnode_parse(n, pieces=[''], indent=4, restrict=restrict) + + def get_memberdef_nodes_and_signatures(self, node, kind): + """Collects the memberdef nodes and corresponding signatures that + correspond to public function entries that are at most depth 2 deeper + than the current (compounddef) node. Returns a dictionary with + function signatures (what swig expects after the %feature directive) + as keys, and a list of corresponding memberdef nodes as values.""" + sig_dict = {} + sig_prefix = '' + if kind in ('file', 'namespace'): + ns_node = node.getElementsByTagName('innernamespace') + if not ns_node and kind == 'namespace': + ns_node = node.getElementsByTagName('compoundname') + if ns_node: + sig_prefix = self.extract_text(ns_node[0]) + '::' + elif kind in ('class', 'struct'): + # Get the full function name. + cn_node = node.getElementsByTagName('compoundname') + sig_prefix = self.extract_text(cn_node[0]) + '::' + + md_nodes = self.get_specific_subnodes(node, 'memberdef', recursive=2) + for n in md_nodes: + if n.attributes['prot'].value != 'public': + continue + if n.attributes['kind'].value in ['variable', 'typedef']: + continue + if not self.get_specific_subnodes(n, 'definition'): + continue + name = self.extract_text(self.get_specific_subnodes(n, 'name')) + if name[:8] == 'operator': + continue + sig = sig_prefix + name + if sig in sig_dict: + sig_dict[sig].append(n) + else: + sig_dict[sig] = [n] + return sig_dict + + def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): + """Produce standard documentation for memberdef_nodes.""" + for n in memberdef_nodes: + self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) + if self.with_function_signature: + self.add_line_with_subsequent_indent(self.get_function_signature(n)) + self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) + self.add_text(['";', '\n']) + + def handle_typical_memberdefs(self, signature, memberdef_nodes): + """Produces docstring entries containing an "Overloaded function" + section with the documentation for each overload, if the function is + overloaded and self.with_overloaded_functions is set. Else, produce + normal documentation. + """ + if len(memberdef_nodes) == 1 or not self.with_overloaded_functions: + self.handle_typical_memberdefs_no_overload(signature, memberdef_nodes) + return + + self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) + if self.with_function_signature: + for n in memberdef_nodes: + self.add_line_with_subsequent_indent(self.get_function_signature(n)) + self.add_text('\n') + self.add_text(['Overloaded function', '\n', + '-------------------']) + for n in memberdef_nodes: + self.add_text('\n') + self.add_line_with_subsequent_indent('* ' + self.get_function_signature(n)) + self.subnode_parse(n, pieces=[], indent=4, ignore=['definition', 'name']) + self.add_text(['";', '\n']) + + +# MARK: Tag handlers + def do_linebreak(self, node): + self.add_text(' ') + + def do_ndash(self, node): + self.add_text('--') + + def do_mdash(self, node): + self.add_text('---') + + def do_emphasis(self, node): + self.surround_parse(node, '*', '*') + + def do_bold(self, node): + self.surround_parse(node, '**', '**') + + def do_computeroutput(self, node): + self.surround_parse(node, '`', '`') + + def do_heading(self, node): + self.start_new_paragraph() + pieces, self.pieces = self.pieces, [''] + level = int(node.attributes['level'].value) + self.subnode_parse(node) + if level == 1: + self.pieces.insert(0, '\n') + self.add_text(['\n', len(''.join(self.pieces).strip()) * '=']) + elif level == 2: + self.add_text(['\n', len(''.join(self.pieces).strip()) * '-']) + elif level >= 3: + self.pieces.insert(0, level * '#' + ' ') + # make following text have no gap to the heading: + pieces.extend([''.join(self.pieces) + ' \n', '']) + self.pieces = pieces + + def do_verbatim(self, node): + self.start_new_paragraph() + self.subnode_parse(node, pieces=[''], indent=4) + + def do_blockquote(self, node): + self.start_new_paragraph() + self.subnode_parse(node, pieces=[''], indent='> ') + + def do_hruler(self, node): + self.start_new_paragraph() + self.add_text('* * * * * \n') + + def do_includes(self, node): + self.add_text('\nC++ includes: ') + self.subnode_parse(node) + self.add_text('\n') + +# MARK: Para tag handler + def do_para(self, node): + """This is the only place where text wrapping is automatically performed. + Generally, this function parses the node (locally), wraps the text, and + then adds the result to self.pieces. However, it may be convenient to + allow the previous content of self.pieces to be included in the text + wrapping. For this, use the following *convention*: + If self.pieces ends with '', treat the _previous_ entry as part of the + current paragraph. Else, insert new-line and start a new paragraph + and "wrapping context". + Paragraphs always end with ' \n', but if the parsed content ends with + the special symbol '', this is passed on. + """ + if self.pieces[-1:] == ['']: + pieces, self.pieces = self.pieces[:-2], self.pieces[-2:-1] + else: + self.add_text('\n') + pieces, self.pieces = self.pieces, [''] + self.subnode_parse(node) + dont_end_paragraph = self.pieces[-1:] == [''] + # Now do the text wrapping: + width = self.textwidth - self.indent + wrapped_para = [] + for line in ''.join(self.pieces).splitlines(): + keep_markdown_newline = line[-2:] == ' ' + w_line = textwrap.wrap(line, width=width, break_long_words=False) + if w_line == []: + w_line = [''] + if keep_markdown_newline: + w_line[-1] = w_line[-1] + ' ' + for wl in w_line: + wrapped_para.append(wl + '\n') + if wrapped_para: + if wrapped_para[-1][-3:] != ' \n': + wrapped_para[-1] = wrapped_para[-1][:-1] + ' \n' + if dont_end_paragraph: + wrapped_para.append('') + pieces.extend(wrapped_para) + self.pieces = pieces + +# MARK: List tag handlers + def do_itemizedlist(self, node): + if self.listitem == '': + self.start_new_paragraph() + elif self.pieces != [] and self.pieces[-1:] != ['']: + self.add_text('\n') + listitem = self.listitem + if self.listitem in ['*', '-']: + self.listitem = '-' + else: + self.listitem = '*' + self.subnode_parse(node) + self.listitem = listitem + + def do_orderedlist(self, node): + if self.listitem == '': + self.start_new_paragraph() + elif self.pieces != [] and self.pieces[-1:] != ['']: + self.add_text('\n') + listitem = self.listitem + self.listitem = 0 + self.subnode_parse(node) + self.listitem = listitem + + def do_listitem(self, node): + try: + self.listitem = int(self.listitem) + 1 + item = str(self.listitem) + '. ' + except: + item = str(self.listitem) + ' ' + self.subnode_parse(node, item, indent=4) + +# MARK: Parameter list tag handlers + def do_parameterlist(self, node): + self.start_new_paragraph() + text = 'unknown' + for key, val in node.attributes.items(): + if key == 'kind': + if val == 'param': + text = 'Parameters' + elif val == 'exception': + text = 'Exceptions' + elif val == 'retval': + text = 'Returns' + else: + text = val + break + if self.indent == 0: + self.add_text([text, '\n', len(text) * '-', '\n']) + else: + self.add_text([text, ': \n']) + self.subnode_parse(node) + + def do_parameteritem(self, node): + self.subnode_parse(node, pieces=['* ', '']) + + def do_parameternamelist(self, node): + self.subnode_parse(node) + self.add_text([' :', ' \n']) + + def do_parametername(self, node): + if self.pieces != [] and self.pieces != ['* ', '']: + self.add_text(', ') + data = self.extract_text(node) + self.add_text(['`', data, '`']) + + def do_parameterdescription(self, node): + self.subnode_parse(node, pieces=[''], indent=4) + +# MARK: Section tag handler + def do_simplesect(self, node): + kind = node.attributes['kind'].value + if kind in ('date', 'rcs', 'version'): + return + self.start_new_paragraph() + if kind == 'warning': + self.subnode_parse(node, pieces=['**Warning**: ',''], indent=4) + elif kind == 'see': + self.subnode_parse(node, pieces=['See also: ',''], indent=4) + elif kind == 'return': + if self.indent == 0: + pieces = ['Returns', '\n', len('Returns') * '-', '\n', ''] + else: + pieces = ['Returns:', '\n', ''] + self.subnode_parse(node, pieces=pieces) + else: + self.subnode_parse(node, pieces=[kind + ': ',''], indent=4) + +# MARK: %feature("docstring") producing tag handlers + def do_compounddef(self, node): + """This produces %feature("docstring") entries for classes, and handles + class, namespace and file memberdef entries specially to allow for + overloaded functions. For other cases, passes parsing on to standard + handlers (which may produce unexpected results). + """ + kind = node.attributes['kind'].value + if kind in ('class', 'struct'): + prot = node.attributes['prot'].value + if prot != 'public': + return + self.add_text('\n\n') + classdefn = self.extract_text(self.get_specific_subnodes(node, 'compoundname')) + classname = classdefn.split('::')[-1] + self.add_text('%%feature("docstring") %s "\n' % classdefn) + + if self.with_constructor_list: + constructor_nodes = [] + for n in self.get_specific_subnodes(node, 'memberdef', recursive=2): + if n.attributes['prot'].value == 'public': + if self.extract_text(self.get_specific_subnodes(n, 'definition')) == classdefn + '::' + classname: + constructor_nodes.append(n) + for n in constructor_nodes: + self.add_line_with_subsequent_indent(self.get_function_signature(n)) + + names = ('briefdescription','detaileddescription') + sub_dict = self.get_specific_nodes(node, names) + for n in ('briefdescription','detaileddescription'): + if n in sub_dict: + self.parse(sub_dict[n]) + if self.with_constructor_list: + self.make_constructor_list(constructor_nodes, classname) + if self.with_attribute_list: + self.make_attribute_list(node) + + sub_list = self.get_specific_subnodes(node, 'includes') + if sub_list: + self.parse(sub_list[0]) + self.add_text(['";', '\n']) + + names = ['compoundname', 'briefdescription','detaileddescription', 'includes'] + self.subnode_parse(node, ignore = names) + + elif kind in ('file', 'namespace'): + nodes = node.getElementsByTagName('sectiondef') + for n in nodes: + self.parse(n) + + # now explicitely handle possibly overloaded member functions. + if kind in ['class', 'struct','file', 'namespace']: + md_nodes = self.get_memberdef_nodes_and_signatures(node, kind) + for sig in md_nodes: + self.handle_typical_memberdefs(sig, md_nodes[sig]) + + def do_memberdef(self, node): + """Handle cases outside of class, struct, file or namespace. These are + now dealt with by `handle_overloaded_memberfunction`. + Do these even exist??? + """ + prot = node.attributes['prot'].value + id = node.attributes['id'].value + kind = node.attributes['kind'].value + tmp = node.parentNode.parentNode.parentNode + compdef = tmp.getElementsByTagName('compounddef')[0] + cdef_kind = compdef.attributes['kind'].value + if cdef_kind in ('file', 'namespace', 'class', 'struct'): + # These cases are now handled by `handle_typical_memberdefs` + return + if prot != 'public': + return + first = self.get_specific_nodes(node, ('definition', 'name')) + name = self.extract_text(first['name']) + if name[:8] == 'operator': # Don't handle operators yet. + return + if not 'definition' in first or kind in ['variable', 'typedef']: + return + + data = self.extract_text(first['definition']) + self.add_text('\n') + self.add_text(['/* where did this entry come from??? */', '\n']) + self.add_text('%feature("docstring") %s "\n%s' % (data, data)) + + for n in node.childNodes: + if n not in first.values(): + self.parse(n) + self.add_text(['";', '\n']) + +# MARK: Entry tag handlers (dont print anything meaningful) + def do_sectiondef(self, node): + kind = node.attributes['kind'].value + if kind in ('public-func', 'func', 'user-defined', ''): + self.subnode_parse(node) + + def do_header(self, node): + """For a user defined section def a header field is present + which should not be printed as such, so we comment it in the + output.""" + data = self.extract_text(node) + self.add_text('\n/*\n %s \n*/\n' % data) + # If our immediate sibling is a 'description' node then we + # should comment that out also and remove it from the parent + # node's children. + parent = node.parentNode + idx = parent.childNodes.index(node) + if len(parent.childNodes) >= idx + 2: + nd = parent.childNodes[idx + 2] + if nd.nodeName == 'description': + nd = parent.removeChild(nd) + self.add_text('\n/*') + self.subnode_parse(nd) + self.add_text('\n*/\n') + + def do_member(self, node): + kind = node.attributes['kind'].value + refid = node.attributes['refid'].value + if kind == 'function' and refid[:9] == 'namespace': + self.subnode_parse(node) + + def do_doxygenindex(self, node): + self.multi = 1 + comps = node.getElementsByTagName('compound') + for c in comps: + refid = c.attributes['refid'].value + fname = refid + '.xml' + if not os.path.exists(fname): + fname = os.path.join(self.my_dir, fname) + if not self.quiet: + print("parsing file: %s" % fname) + p = Doxy2SWIG(fname, + with_function_signature = self.with_function_signature, + with_type_info = self.with_type_info, + with_constructor_list = self.with_constructor_list, + with_attribute_list = self.with_attribute_list, + with_overloaded_functions = self.with_overloaded_functions, + textwidth = self.textwidth, + quiet = self.quiet) + p.generate() + self.pieces.extend(p.pieces) + +# MARK: main +def main(): + usage = __doc__ + parser = optparse.OptionParser(usage) + parser.add_option("-f", '--function-signature', + action='store_true', + default=False, + dest='f', + help='include function signature in the documentation. This is handy when not using swig auto-generated function glossary.rst %feature("autodoc", [0,1])') + parser.add_option("-t", '--type-info', + action='store_true', + default=False, + dest='t', + help='include type information for arguments in function signatures. This is similar to swig autodoc level 1') + parser.add_option("-c", '--constructor-list', + action='store_true', + default=False, + dest='c', + help='generate a constructor list for class documentation. Useful for target languages where the object construction should be documented in the class documentation.') + parser.add_option("-a", '--attribute-list', + action='store_true', + default=False, + dest='a', + help='generate an attributes list for class documentation. Useful for target languages where class attributes should be documented in the class documentation.') + parser.add_option("-o", '--overloaded-functions', + action='store_true', + default=False, + dest='o', + help='collect all documentation for overloaded functions. Useful for target languages that have no concept of overloaded functions, but also to avoid having to attach the correct docstring to each function overload manually') + parser.add_option("-w", '--width', type="int", + action='store', + dest='w', + default=80, + help='textwidth for wrapping (default: 80). Note that the generated lines may include 2 additional spaces (for markdown).') + parser.add_option("-q", '--quiet', + action='store_true', + default=False, + dest='q', + help='be quiet and minimize output') + + options, args = parser.parse_args() + if len(args) != 2: + parser.error("no input and output specified") + + p = Doxy2SWIG(args[0], + with_function_signature = options.f, + with_type_info = options.t, + with_constructor_list = options.c, + with_attribute_list = options.a, + with_overloaded_functions = options.o, + textwidth = options.w, + quiet = options.q) + p.generate() + p.write(args[1]) + +if __name__ == '__main__': + main() diff --git a/tools/install_macos_sdk.sh b/tools/install_macos_sdk.sh new file mode 100755 index 0000000..341634c --- /dev/null +++ b/tools/install_macos_sdk.sh @@ -0,0 +1,33 @@ +# See: https://github.com/openmm/openmm/blob/master/devtools/ci/gh-actions/scripts/install_macos_sdk.sh + +# Install an older MacOS SDK +# This should guarantee OpenMM builds with extended compatibility across MacOS versions +# Adapted from conda-forge-ci-setup scripts: +# * https://github.com/conda-forge/conda-forge-ci-setup-feedstock/blob/dde296e/recipe/run_conda_forge_build_setup_osx +# * https://github.com/conda-forge/conda-forge-ci-setup-feedstock/blob/dde296e/recipe/download_osx_sdk.sh +# +# Some possible updates might involve upgrading the download link to future MacOS releases (10.15 to something else), +# depending on the version provided by the CI + +OSX_SDK_DIR="$(xcode-select -p)/Platforms/MacOSX.platform/Developer/SDKs" +export MACOSX_DEPLOYMENT_TARGET=10.9 +export MACOSX_SDK_VERSION=10.9 + +export CMAKE_OSX_SYSROOT="${OSX_SDK_DIR}/MacOSX${MACOSX_SDK_VERSION}.sdk" + +if [[ ! -d ${CMAKE_OSX_SYSROOT}} ]]; then + echo "Downloading ${MACOSX_SDK_VERSION} sdk" + curl -L -O --connect-timeout 5 --max-time 10 --retry 5 --retry-delay 0 --retry-max-time 40 --retry-connrefused --retry-all-errors \ + https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX${MACOSX_SDK_VERSION}.sdk.tar.xz + tar -xf MacOSX${MACOSX_SDK_VERSION}.sdk.tar.xz -C "$(dirname ${CMAKE_OSX_SYSROOT})" +fi + +if [[ "$MACOSX_DEPLOYMENT_TARGET" == 10.* ]]; then +# set minimum sdk version to our target +plutil -replace MinimumSDKVersion -string ${MACOSX_SDK_VERSION} $(xcode-select -p)/Platforms/MacOSX.platform/Info.plist +plutil -replace DTSDKName -string macosx${MACOSX_SDK_VERSION}internal $(xcode-select -p)/Platforms/MacOSX.platform/Info.plist +fi + +echo "MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}" >> ${GITHUB_ENV} +echo "CMAKE_OSX_SYSROOT=${MACOSX_DEPLOYMENT_TARGET}" >> ${GITHUB_ENV} +echo "CMAKE_OSX_SYSROOT=${CMAKE_OSX_SYSROOT}" >> ${GITHUB_ENV} \ No newline at end of file diff --git a/tools/setup.bat b/tools/setup.bat new file mode 100644 index 0000000..e2822cb --- /dev/null +++ b/tools/setup.bat @@ -0,0 +1,3 @@ +copy /Y tools\.condarc %userprofile% +copy /Y tools\conda_build_config.yaml conda_build_config.yaml +mamba install -y boa doxygen cmake diff --git a/tools/setup.sh b/tools/setup.sh new file mode 100755 index 0000000..87ce6d1 --- /dev/null +++ b/tools/setup.sh @@ -0,0 +1,3 @@ +cp tools/.condarc ~ +cp tools/conda_build_config.yaml ~ +mamba install boa doxygen cmake