diff --git a/.gitignore b/.gitignore index 4fadd5d45..a21d898f3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,13 @@ +build + Makefile.local +tools/astprint tools/demangle tools/mcjitcache tools/mcjitcache_release tools/publicize tools/publicize_release + pyston pyston_opt pyston_noasserts @@ -15,9 +19,17 @@ pyston_debug pyston_prof pyston_profile pyston_release +pyston_grwl +pyston_grwl_dbg +pyston_nosync +pyston_gcc +pyston_release_gcc +pyston_release_gcc_pgo +pyston_release_gcc_pgo_instrumented +pystontmp*/ +/*_unittest + *.cache -tests/t.py -tests/t2.py *.bc stdlib.ll *.o @@ -28,6 +40,7 @@ stdlib*.ll oprofile_data pprof.jit tags +TAGS *.pyc perf.data @@ -38,4 +51,16 @@ gmon.out find_problem.status *.expected_cache +plugins/clang_capi + *.so +*.pch + +compile.log + +*.swp +*.swo + +*.out + +*~ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..aaf2f7714 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,46 @@ +[submodule "test/lib/django"] + path = test/lib/django + url = https://github.com/django/django +[submodule "test/lib/virtualenv"] + path = test/lib/virtualenv + url = https://github.com/dropbox/virtualenv +[submodule "test/lib/pycrypto"] + path = test/lib/pycrypto + url = https://github.com/dlitz/pycrypto.git +[submodule "test/lib/gflags"] + path = test/lib/gflags + url = https://github.com/google/python-gflags + ignore = untracked +[submodule "test/lib/sqlalchemy"] + path = test/lib/sqlalchemy + url = https://github.com/zzzeek/sqlalchemy +[submodule "test/lib/pyxl"] + path = test/lib/pyxl + url = https://github.com/dropbox/pyxl.git + ignore = untracked +[submodule "build_deps/libunwind"] + path = build_deps/libunwind + url = https://github.com/pathscale/libunwind.git + ignore = all +[submodule "build_deps/libpypa"] + path = build_deps/libpypa + url = git://github.com/vinzenz/libpypa.git +[submodule "build_deps/lz4"] + path = build_deps/lz4 + url = git://github.com/Cyan4973/lz4.git +[submodule "build_deps/jemalloc"] + path = build_deps/jemalloc + url = git://github.com/jemalloc/jemalloc.git +[submodule "test/lib/numpy"] + path = test/lib/numpy + url = https://github.com/numpy/numpy +[submodule "test/lib/decorator"] + path = test/lib/decorator + url = https://github.com/micheles/decorator +[submodule "test/lib/pyinotify"] + path = test/lib/pyinotify + url = https://github.com/seb-m/pyinotify.git +[submodule "test/lib/sqlalchemy_0.5"] + path = test/lib/sqlalchemy_0.5 + url = https://github.com/zzzeek/sqlalchemy + ignore = untracked diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..5c8cc935e --- /dev/null +++ b/.travis.yml @@ -0,0 +1,91 @@ +language: cpp + +compiler: + - clang + - gcc + +env: + matrix: + - TRAVIS_BUILD_TYPE=Debug CCACHE_DIR=$HOME/.ccache_debug + - TRAVIS_BUILD_TYPE=Release CCACHE_DIR=$HOME/.ccache_release + global: + - PYSTON_RUN_ARGS=G + +matrix: + exclude: + - compiler: gcc + env: TRAVIS_BUILD_TYPE=Debug CCACHE_DIR=$HOME/.ccache_debug + +# use travis-ci docker based infrastructure +sudo: false + +cache: + directories: + - $HOME/.ccache_debug + - $HOME/.ccache_release + +addons: + apt: + sources: + - kubuntu-backports + - llvm-toolchain-precise-3.5 + - ubuntu-toolchain-r-test + packages: + - autoconf + - ccache + - clang-3.5 + - cmake + - g++-4.8 + - gdb + - libbz2-dev + - libgeoip-dev + - libgmp3-dev + - libmpfr-dev + - liblzma-dev + - libncurses5-dev + - libreadline-dev + - libsqlite3-dev + - libtool + - ninja-build + - python-dev + - texlive-extra-utils + - libcurl4-openssl-dev + - libxml2-dev + - libxslt1-dev + - libssl-dev + - swig + +before_install: + - if [ "$CC" = "clang" ]; then export CC="clang-3.5" CXX="clang++-3.5"; fi + - if [ "$CC" = "gcc" ]; then export CC="gcc-4.8" CXX="g++-4.8"; fi + - $CXX --version + +install: + - git clone git://github.com/llvm-mirror/llvm.git ~/pyston_deps/llvm-trunk + - git clone git://github.com/llvm-mirror/clang.git ~/pyston_deps/llvm-trunk/tools/clang + - git config --global user.email "you@example.com" + - git config --global user.name "Your Name" + - git submodule update --init --recursive + - mkdir ~/pyston-build && cd ~/pyston-build + - make -C $TRAVIS_BUILD_DIR llvm_up + - bash $TRAVIS_BUILD_DIR/tools/configure_travis.sh + +before_script: + - mysql -e 'create database mysqldb_test charset utf8;' + +script: + - ccache -z + - ninja -j4 pyston check-deps && PYSTON_RUN_ARGS=G travis_wait 45 ctest --output-on-failure + - ccache -s + - if [ -n "$(git status --porcelain --untracked=no)" ]; then echo "test suite left the source directory dirty"; git status; false; fi + +os: + - linux +# - osx + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/7256425a36658faa8b9b + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always diff --git a/.vimrc.dir b/.vimrc.dir new file mode 100644 index 000000000..2d3dde6fe --- /dev/null +++ b/.vimrc.dir @@ -0,0 +1,8 @@ +set wildignore+=*.expected_cache,*.pyc,*.out,*.bc,*.d,*.o + +let g:pyston_top = expand(':p:h') +command! M execute ":make -C " . g:pyston_top . "/src -j1 COLOR=0 USE_DISTCC=0" +command! L execute ":cfile " . g:pyston_top . "/src/compile.log" + +ca m M +ca l L diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000..cc80f797c --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,411 @@ +cmake_minimum_required(VERSION 2.8) +project(pyston C CXX ASM) + +set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake) + +include(pyconfig) +include(ExternalProject) + +set(DEPS_DIR $ENV{HOME}/pyston_deps) + +if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8) + message(FATAL_ERROR "Pyston does not support 32-bit systems yet") +endif() + +# set build type to release by default +set(CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE) +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Debug Release." FORCE) +endif() +if(NOT ${CMAKE_BUILD_TYPE} STREQUAL "Release" AND NOT ${CMAKE_BUILD_TYPE} STREQUAL "Debug") + message(FATAL_ERROR "CMAKE_BUILD_TYPE must be set to Release or Debug") +endif() + +if(UNIX) + find_program(MAKE_PRG NAMES gmake make) + if(MAKE_PRG) + execute_process( + COMMAND "${MAKE_PRG}" --version + OUTPUT_VARIABLE MAKE_VERSION_INFO) + if(NOT "${OUTPUT_VARIABLE}" MATCHES ".*GNU.*") + unset(MAKE_PRG) + endif() + endif() + if(NOT MAKE_PRG) + message(FATAL_ERROR "GNU Make is required to build the dependencies.") + else() + message(STATUS "Found GNU Make at ${MAKE_PRG}") + endif() +endif() + +option(ENABLE_CCACHE "enable caching compiler output" ON) +option(ENABLE_EXTRA_TESTS "pyston extra tests" OFF) +option(ENABLE_GIL "threading use GIL" ON) +option(ENABLE_GOLD "enable the gold linker" ON) +option(ENABLE_GPERFTOOLS "enable the google performance tools" OFF) +option(ENABLE_GRWL "threading use GRWL" OFF) +option(ENABLE_INTEL_JIT_EVENTS "LLVM support for Intel JIT Events API" OFF) +option(ENABLE_LLVM_DEBUG "LLVM debug symbols" OFF) +option(ENABLE_OPROFILE "enable oprofile support" OFF) +option(ENABLE_SELF_HOST "use pyston to test pyston" OFF) +option(ENABLE_VALGRIND "pyston valgrind support" OFF) + +option(ENABLE_PGO "enable -fprofile-generate/-fprofile-use" OFF) +option(ENABLE_LTO "enable -flto" OFF) + +# automatically use ccache if found +if(ENABLE_CCACHE AND NOT ENABLE_PGO) + find_program(CCACHE ccache) + if(CCACHE) + message(STATUS "found ccache ${CCACHE}") + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "CCACHE_CPP2=yes ${CCACHE}") + set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ${CCACHE}) + endif() +endif() + +# automatically use the gold linker if found +if(ENABLE_GOLD) + find_program(GOLD_LINKER ld.gold) + if(GOLD_LINKER) + message(STATUS "found the gold linker ${GOLD_LINKER}") + set(CMAKE_LINKER "${GOLD_LINKER}") + set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -B${CMAKE_SOURCE_DIR}/tools/build_system") + if(NOT ENABLE_PGO) + # let pgo determine the ordering + set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -Wl,--section-ordering-file,${CMAKE_CURRENT_SOURCE_DIR}/section_ordering.txt") + endif() + endif() +endif() + + +# pyston self host mode +if(ENABLE_SELF_HOST) + set(PYTHON_EXE "pyston") +else() + set(PYTHON_EXE "python2") +endif() + +# initial clang flags (set here so they're used when building llvm) +set(CLANG_FLAGS "-Qunused-arguments -fcolor-diagnostics" CACHE STRING "Clang specific C and CXX flags") +if("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CLANG_FLAGS}") +endif() +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CLANG_FLAGS}") +endif() + +if(ENABLE_LTO) + set(LTO_FLAGS "-flto") +endif() + +if(ENABLE_PGO) + if(NOT "${PROFILE_DIR}" STREQUAL "") + set(PROFILE_STATE "${PROFILE_STATE}=${PROFILE_DIR}") + endif() + set(PGO_FLAGS "-fprofile-${PROFILE_STATE} -fprofile-correction") +endif() + +macro(ADD_PROFILE_FLAGS) + set(PROFILE_FLAGS "${PGO_FLAGS} ${LTO_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${PROFILE_FLAGS}") +endmacro() + +# used in src/runtime/inline/CMakeLists.txt to remove the flags we added above +macro(REMOVE_PROFILE_FLAGS) + string(REPLACE "${PROFILE_FLAGS}" " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") + string(REPLACE "${PROFILE_FLAGS}" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + string(REPLACE "${PROFILE_FLAGS}" " " CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") + string(REPLACE "${PROFILE_FLAGS}" " " CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS}") + string(REPLACE "${PROFILE_FLAGS}" " " CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}") +endmacro() + +# llvm disable debug info unless ENABLE_LLVM_DEBUG is ON +if(${CMAKE_BUILD_TYPE} STREQUAL "Debug" AND NOT ENABLE_LLVM_DEBUG) + set(CMAKE_CXX_FLAGS_DEBUG "-O3" CACHE STRING "" FORCE) +endif() + +add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/gitmodules + COMMAND git submodule update --init build_deps + COMMAND cmake -E touch ${CMAKE_BINARY_DIR}/gitmodules + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + DEPENDS ${CMAKE_SOURCE_DIR}/.gitmodules) +add_custom_target(gitsubmodules DEPENDS ${CMAKE_BINARY_DIR}/gitmodules) + +# jemalloc +ExternalProject_Add(libjemalloc + BUILD_BYPRODUCTS ${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc.a + PREFIX jemalloc-build + SOURCE_DIR ${CMAKE_SOURCE_DIR}/build_deps/jemalloc + DEPENDS gitsubmodules + UPDATE_COMMAND autoconf + CONFIGURE_COMMAND ${CMAKE_SOURCE_DIR}/build_deps/jemalloc/configure --prefix=${CMAKE_BINARY_DIR}/jemalloc --enable-autogen --enable-prof-libunwind + BUILD_COMMAND ${MAKE_PRG} + INSTALL_COMMAND ${MAKE_PRG} install_bin install_lib + LOG_UPDATE ON + LOG_CONFIGURE ON + LOG_BUILD ON + LOG_INSTALL ON) + +# +# CMake (<3.3) does not support BUILD_BYPRODUCTS. +# This is a problem for ninja, which will not know how to build the generated file. +# Here are a couple hacks to get around it: +# +# Add a copy step. This just hides the dependency but it seems to work. +# add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc_copied.a DEPENDS libjemalloc COMMAND cp "${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc.a" ${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc_copied.a) +# add_custom_target(libjemalloc_copied DEPENDS ${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc_copied.a) +# +# Hack option #2: the existence of the custom target tells ninja that libjemalloc.a will get built somehow. +# The name of the target doesn't matter. +add_custom_target(libjemalloc_byproducts DEPENDS ${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc.a) +# +# Hack option #3: delete the .so's and use `-ljemalloc` on the link line so that ninja doesn't know about the dependency. +# ExternalProject_Add_Step(libjemalloc disable_shared +# DEPENDEES install +# COMMAND sh -c "rm -v ${CMAKE_BINARY_DIR}/jemalloc/lib/*.so*" +# ) + +execute_process(COMMAND cat llvm_revision.txt WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_VARIABLE LLVMREV OUTPUT_STRIP_TRAILING_WHITESPACE) + +# llvm, clang, and libunwind patches +add_custom_target(llvm_gotorev python ${CMAKE_SOURCE_DIR}/tools/git_svn_gotorev.py ${DEPS_DIR}/llvm-trunk ${LLVMREV} llvm_patches WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) +add_custom_target(clang_gotorev python ${CMAKE_SOURCE_DIR}/tools/git_svn_gotorev.py ${DEPS_DIR}/llvm-trunk/tools/clang ${LLVMREV} clang_patches WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}) +add_custom_target(llvm_up DEPENDS llvm_gotorev clang_gotorev) + +set(LIBUNWIND_PATCHES + ${CMAKE_SOURCE_DIR}/libunwind_patches/0001-pyston-add-lots-of-comments.patch + ${CMAKE_SOURCE_DIR}/libunwind_patches/0002-pyston-stop-x86_64-setcontext-restoring-uninitialize.patch + ${CMAKE_SOURCE_DIR}/libunwind_patches/0003-use-a-sorted-array-for-registered-objects-and-do-a-b.patch + ${CMAKE_SOURCE_DIR}/libunwind_patches/9999-is-patched-marker.patch +) + +add_custom_command(OUTPUT ${CMAKE_SOURCE_DIR}/build_deps/libunwind/pyston_patched + COMMAND git submodule update build_deps/libunwind + COMMAND python ${CMAKE_SOURCE_DIR}/tools/git_am_automated.py build_deps/libunwind ${LIBUNWIND_PATCHES} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + DEPENDS ${LIBUNWIND_PATCHES} + DEPENDS gitsubmodules) +add_custom_target(libunwind_patched DEPENDS ${CMAKE_SOURCE_DIR}/build_deps/libunwind/pyston_patched) + +# llvm +set(LLVM_TARGETS_TO_BUILD "host" CACHE STRING "LLVM targets") +#set(LLVM_EXTERNAL_CLANG_SOURCE_DIR "${CMAKE_SOURCE_DIR}/clang" CACHE String "Clang directory") +if(ENABLE_INTEL_JIT_EVENTS) + set(LLVM_USE_INTEL_JITEVENTS "ON" CACHE STRING "Enable building support for the Intel JIT Events API") + set(INTEL_JIT_EVENTS_LIB "inteljitevents") + add_definitions(-DENABLE_INTEL_JIT_EVENTS=1) +endif() +add_subdirectory(${DEPS_DIR}/llvm-trunk ${CMAKE_BINARY_DIR}/llvm EXCLUDE_FROM_ALL) +list(APPEND CMAKE_MODULE_PATH "${CMAKE_BINARY_DIR}/llvm/share/llvm/cmake/") +include(LLVMConfig) +llvm_map_components_to_libnames(LLVM_LIBS core mcjit native bitreader bitwriter ipo irreader debuginfodwarf instrumentation ${INTEL_JIT_EVENTS_LIB}) + +# libunwind +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + set(LIBUNWIND_DEBUG_CFLAGS "CFLAGS=-O0 -g") + set(LIBUNWIND_DEBUG "--enable-debug") + set(LIBUNWIND_DEBUG_FRAME "--enable-debug-frame") + set(LIBUNWIND_CONSERVATIVE_CHECKS "--enable-conservative-checks") +else() + set(LIBUNWIND_CONSERVATIVE_CHECKS "--disable-conservative-checks") +endif() +ExternalProject_Add(libunwind + PREFIX build_deps/libunwind + SOURCE_DIR ${CMAKE_SOURCE_DIR}/build_deps/libunwind + DEPENDS libunwind_patched + UPDATE_COMMAND autoreconf -i + CONFIGURE_COMMAND ${CMAKE_SOURCE_DIR}/build_deps/libunwind/configure ${LIBUNWIND_DEBUG_CFLAGS} --prefix=${CMAKE_BINARY_DIR}/build_deps/libunwind --enable-shared=0 --disable-block-signals ${LIBUNWIND_CONSERVATIVE_CHECKS} ${LIBUNWIND_DEBUG} ${LIBUNWIND_DEBUG_FRAME} + BUILD_COMMAND ${MAKE_PRG} -j${TEST_THREADS} + LOG_UPDATE ON + LOG_CONFIGURE ON + LOG_BUILD ON + LOG_INSTALL ON) +# Tell CMake that patching libunwind means that we need to rebuild it: +ExternalProject_Add_Step(libunwind forcebuild + DEPENDS ${CMAKE_SOURCE_DIR}/build_deps/libunwind/pyston_patched + DEPENDERS build +) +# Tell CMake that rebuilding libunwind will touch the build files (why doesn't it know this??) +# Otherwise, if you do something that triggers a rebuild (not a fresh build) of libunwind, +# it will take another build to realize that any source files that #include libunwind.h +# need to get rebuilt. +SET_SOURCE_FILES_PROPERTIES( + ${CMAKE_BINARY_DIR}/build_deps/libunwind/include/libunwind.h PROPERTIES OBJECT_DEPENDS ${CMAKE_SOURCE_DIR}/build_deps/libunwind/pyston_patched +) + +# libpypa +add_subdirectory(build_deps/libpypa EXCLUDE_FROM_ALL) +add_dependencies(pypa gitsubmodules) + +# lz4 +add_subdirectory(build_deps/lz4/cmake_unofficial EXCLUDE_FROM_ALL) +add_dependencies(lz4 gitsubmodules) + +# valgrind +if(ENABLE_VALGRIND) + find_package(Valgrind REQUIRED) + include_directories(${VALGRIND_INCLUDE_DIR}) + message(STATUS "Including valgrind ${VALGRIND_INCLUDE_DIR}") +else() + add_definitions(-DNVALGRIND) +endif() + +if(ENABLE_GRWL) + add_definitions(-DTHREADING_USE_GIL=0 -DTHREADING_USE_GRWL=1) +else() + add_definitions(-DTHREADING_USE_GIL=1 -DTHREADING_USE_GRWL=0) +endif() + +if(ENABLE_GPERFTOOLS) + set(OPTIONAL_LIBRARIES ${OPTIONAL_LIBRARIES} profiler) +endif() + +if(ENABLE_OPROFILE) + set(OPTIONAL_LIBRARIES ${OPTIONAL_LIBRARIES} opagent) +endif() + +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Werror -Wreturn-type -Wno-sign-compare -Wno-unused -Wno-unused-parameter -fno-omit-frame-pointer -g") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_C_FLAGS} -std=c++11 -fno-rtti -fexceptions -fvisibility-inlines-hidden -ffunction-sections -fdata-sections -Woverloaded-virtual -Wno-invalid-offsetof -Wcast-qual -Wno-sign-conversion -Wnon-virtual-dtor -Winit-self -Wmissing-include-dirs -Wstrict-overflow=5 -Wpointer-arith -Wtype-limits -Wwrite-strings -Wempty-body -Waggregate-return -Wmissing-field-initializers -Wredundant-decls -Winline -Wint-to-pointer-cast -Wlong-long -Wvla -Wno-attributes -g") + +set(CLANG_FLAGS "${CLANG_FLAGS} -Wimplicit-int -Wstrict-prototypes -Wold-style-definition -Wnested-externs -Wpointer-to-int-cast -Wno-mismatched-tags -Wno-extern-c-compat") +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CLANG_FLAGS}") +endif() + +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-long-long -Wno-aggregate-return -Wno-inline -Wno-redundant-decls -Wno-strict-overflow") + set(CMAKE_CXX_LINK_FLAGS "${CMAKE_CXX_LINK_FLAGS} -Wl,--no-as-needed") # without this, we don't seem to get jemalloc on gcc builds +endif() + +add_definitions(${LLVM_DEFINITIONS}) +add_definitions(-DDEFAULT_PYTHON_MAJOR_VERSION=2 -DDEFAULT_PYTHON_MINOR_VERSION=7 -DDEFAULT_PYTHON_MICRO_VERSION=6) # Python 2.7.6 +add_definitions(-DLLVMREV=${LLVMREV}) + +include_directories(${CMAKE_BINARY_DIR}/from_cpython/Include) +include_directories(${LLVM_INCLUDE_DIRS}) + +find_package(LibLZMA REQUIRED) +link_directories(${CMAKE_BINARY_DIR}/build_deps/libunwind/lib) +link_directories(${LLVM_LIBRARY_DIRS}) + +ADD_PROFILE_FLAGS() + +add_subdirectory(lib_pyston) +add_subdirectory(from_cpython) +add_subdirectory(src) +add_subdirectory(test/test_extension) +add_subdirectory(test/unittests) +add_subdirectory(tools) + +# There are supposed to be better ways [1] to add link dependencies, but none of them worked for me. +# [1] http://www.cmake.org/pipermail/cmake/2010-May/037206.html +add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/linkdeps_dummy.c COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/linkdeps_dummy.c DEPENDS ${CMAKE_SOURCE_DIR}/section_ordering.txt) + +add_executable(pyston $ $ $ linkdeps_dummy.c) +# Wrap the stdlib in --whole-archive to force all the symbols to be included and eventually exported + +target_link_libraries(pyston -Wl,--whole-archive stdlib -Wl,--no-whole-archive pthread m z readline sqlite3 gmp mpfr ssl crypto unwind pypa liblz4 double-conversion util ${LLVM_LIBS} ${LIBLZMA_LIBRARIES} ${OPTIONAL_LIBRARIES} ${CMAKE_BINARY_DIR}/jemalloc/lib/libjemalloc.a) +add_dependencies(pyston libjemalloc) + +# copy src/codegen/parse_ast.py to the build directory +add_custom_command(TARGET pyston POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_SOURCE_DIR}/src/codegen/parse_ast.py ${CMAKE_BINARY_DIR}/src/codegen/parse_ast.py) + +add_custom_target(astcompare COMMAND ${CMAKE_SOURCE_DIR}/tools/astprint_test.sh + DEPENDS astprint + COMMENT "Running libpypa vs CPython AST result comparison test") + +# test +enable_testing() +set(TEST_THREADS 1 CACHE STRING "number of pyston test threads") +set(PYTHONIOENCODING utf-8) + +add_test(NAME lint COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/tools/lint.py WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src) +add_test(NAME check-format COMMAND ${CMAKE_SOURCE_DIR}/tools/check_format.sh ${LLVM_TOOLS_BINARY_DIR}/clang-format WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src) +add_test(NAME gc_unittest COMMAND gc_unittest) +add_test(NAME analysis_unittest COMMAND analysis_unittest) + +macro(add_pyston_test testname directory) + add_test(NAME pyston_${testname}_${directory} COMMAND ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/tools/tester.py -R ./pyston -j${TEST_THREADS} -k -a=-S ${ARGV2} ${ARGV3} ${ARGV4} ${CMAKE_SOURCE_DIR}/test/${directory}) +endmacro() + +# tests testname directory arguments +add_pyston_test(defaults tests --order-by-mtime -t50) +add_pyston_test(force_llvm tests -a=-n -a=-X -t50) +if(${CMAKE_BUILD_TYPE} STREQUAL "Release") + add_pyston_test(max_compilation_tier tests -a=-O -a=-X -t50) +endif() +add_pyston_test(defaults cpython --exit-code-only --skip-failing -t100) +add_pyston_test(defaults integration --exit-code-only --skip-failing -t600) +if(ENABLE_EXTRA_TESTS) + add_pyston_test(defaults extra -t900 --exit-code-only) +endif() + + +# format +file(GLOB_RECURSE FORMAT_FILES ${CMAKE_SOURCE_DIR}/src/*.h ${CMAKE_SOURCE_DIR}/src/*.cpp) +add_custom_target(format ${CMAKE_SOURCE_DIR}/tools/do_format.sh ${LLVM_TOOLS_BINARY_DIR}/clang-format DEPENDS clang-format WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src) +add_custom_target(check-format ${CMAKE_SOURCE_DIR}/tools/check_format.sh ${LLVM_TOOLS_BINARY_DIR}/clang-format DEPENDS clang-format WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src) + +# lint +add_custom_target(lint ${PYTHON_EXE} ${CMAKE_SOURCE_DIR}/tools/lint.py WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src) + +# check +add_custom_target(check-deps DEPENDS pyston copy_stdlib copy_libpyston clang-format ext_cpython ext_pyston unittests sharedmods) +add_custom_target(check-pyston COMMAND ${CMAKE_CTEST_COMMAND} --output-on-failure DEPENDS check-deps) + +# {run,dbg,perf,memcheck,memleaks,cachegrind}_TESTNAME +file(GLOB RUNTARGETS ${CMAKE_SOURCE_DIR}/test/tests/*.py ${CMAKE_SOURCE_DIR}/microbenchmarks/*.py ${CMAKE_SOURCE_DIR}/minibenchmarks/*.py) +foreach(RUNTARGET ${RUNTARGETS}) + get_filename_component(BASEFILENAME ${RUNTARGET} NAME_WE) + add_custom_target(run_${BASEFILENAME} ./pyston -q ${RUNTARGET} DEPENDS pyston) + add_custom_target(dbg_${BASEFILENAME} gdb --ex "set confirm off" --ex "handle SIGUSR2 pass nostop noprint" --ex run --ex "bt 20" --args ./pyston -q ${RUNTARGET} DEPENDS pyston) + add_custom_target(perf_${BASEFILENAME} perf record -g -- ./pyston -q -p ${RUNTARGET} + COMMAND perf report -v -n -g flat,1000 | bash ${CMAKE_SOURCE_DIR}/tools/cumulate.sh | less -S) + + if(ENABLE_VALGRIND) + add_custom_target(memcheck_${BASEFILENAME} valgrind --tool=memcheck --leak-check=no --db-attach=yes ./pyston ${RUNTARGET} DEPENDS pyston) + add_custom_target(memleaks_${BASEFILENAME} valgrind --tool=memcheck --leak-check=full --leak-resolution=low --show-reachable=yes ./pyston ${RUNTARGET} DEPENDS pyston) + add_custom_target(cachegrind_${BASEFILENAME} valgrind --tool=cachegrind ./pyston ${RUNTARGET} DEPENDS pyston) + endif() +endforeach() + +# doxygen +find_package(Doxygen) +if(DOXYGEN_FOUND) + configure_file(${CMAKE_SOURCE_DIR}/docs/Doxyfile.in ${CMAKE_BINARY_DIR}/Doxyfile @ONLY) + add_custom_target(docs ${DOXYGEN_EXECUTABLE} ${CMAKE_BINARY_DIR}/Doxyfile WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +else() + add_custom_target(docs COMMAND ${CMAKE_COMMAND} -E echo "Can't create docs, doxygen not installed \(try sudo apt-get install doxygen grpahviz on Ubuntu and then rerun cmake\)" VERBATIM) +endif() + +add_subdirectory(plugins/refcount_checker EXCLUDE_FROM_ALL) + + +# CPack config. I think this needs to come after any other code, since it will +# look at whatever install targets have already been set up. + +set(CPACK_GENERATOR "TGZ") + +set(CMAKE_EXECUTABLE_FORMAT "ELF") # Otherwise cmake thinks this is a cross-compile +install(TARGETS pyston DESTINATION ".") + +set(CPACK_PACKAGE_VERSION_MAJOR "0") +set(CPACK_PACKAGE_VERSION_MINOR "4") +set(CPACK_PACKAGE_VERSION_PATCH "0") + +set(CPACK_SYSTEM_NAME "linux64") + +set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE) +set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_SOURCE_DIR}/README.md) + +install(FILES LICENSE README.md DESTINATION ".") + +install(FILES test/lib/virtualenv/virtualenv.py DESTINATION virtualenv) +install(FILES test/lib/virtualenv/virtualenv_support/pip-6.0.8-py2.py3-none-any.whl DESTINATION virtualenv/virtualenv_support) +install(FILES test/lib/virtualenv/virtualenv_support/setuptools-12.0.5-py2.py3-none-any.whl DESTINATION virtualenv/virtualenv_support) + +include(CPack) + + +# last file added (need to change this if we add a file that is added via a glob): +# from_cpython/Lib/test/test_zipimport.py diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..7e22b1347 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,52 @@ +## Contributing to Pyston + +### Pull Requests + +Before a pull request can be merged, you need to to sign the [Dropbox Contributor License Agreement](https://opensource.dropbox.com/cla/). + +Please make sure to run at least the basic tests (`make quick_check`) on your changes. Travis CI will run the full test suite (`make check`) on each pull request. + +##### Formatting + +Please make sure `make check_format` passes on your commits. If it reports any issues, you can run `make format` to auto-apply the project formatting rules. Note that this will format your files in-place with no built-in undo, so you may want to create a temporary commit if you have any unstaged changes. + +Adding a pre-commit hook can be useful to catch formatting errors earlier. i.e. have in `~/pyston/.git/hooks/pre-commit`: + +``` +#!/bin/sh +exec make -C ~/pyston check_format +``` + +### Getting started with the codebase + +The easiest way to contribute to Pyston is to help us improve our compatibility with CPython. There are many small tasks to do such as built-in functions that are not yet implemented, edge cases that are not being handled by Pyston or where our output is slightly different than CPython, etc. The fix will often involve a local change, giving a smooth start to learning the codebase. One of Python's greatest strengths is that it comes ["batteries included"](https://xkcd.com/353/), but this means that there is a long long tail of these little tasks that needs to be driven through - your help is immensely valuable there! + +The command `make quick_check` will first run our Pyston tests (great way to make sure everything is in order) and then the default CPython tests. You will get an output that looks like this: + +``` + ... + test_bastion.py Correct output (125.7ms) + test_unittest.py (skipped: expected to fail) + test_json.py (skipped: expected to fail) + test_future3.py Correct output (952.8ms) + ... +``` + +Notice that a large number tests are currently marked as failing (marked with an `# expected: fail` comment at the top of the file). Just pick any that you think is interesting and get it to pass! Remove the `#expected: fail` flag and run `make check_TESTNAME` (without `.py`) to compare the result to CPython's (the command will search for TESTNAME in the `test/` directory). If the test is crashing, the easiest way to start debugging is to use `make dbg_TESTNAME` which is essentially `make check_TESTNAME` inside GDB. + +This kind of work will often happen where native libraries are defined (e.g. `src/runtime/builtin_modules/builtins.cpp`), implementation of types (e.g. `src/runtime/str.cpp`) and debugging may involve tracing through the interpreter (`src/codegen/ast_interpreter.cpp`). The code in those files should be relatively straightforward. Code that involve the JIT (function rewriting, assembly generation, etc) is more intricate and confusing in the beginning (e.g. `src/asm_writing/rewriter.cpp`). Keep in mind, it's perfectly fine to ask for help! + +To save you some time, the cause of failures for some of the tests [may have already been identified](test/CPYTHON_TEST_NOTES.md). Do note, however, that not all of CPython's behavior can be matched exactly. For example, by nature of having a garbage collector over reference counting, the freeing of objects is non-deterministic and we can't necessarily call object finalizers in the same order as CPython does. + +[Some tips on challenges you might run into and debugging tips](docs/TIPS.md). + +You can also check out our [Github issues list](https://github.com/dropbox/pyston/issues), especially those marked ["probably easy"](https://github.com/dropbox/pyston/labels/probably%20easy). + +### Communicating + +- We use a [gitter chat room](https://gitter.im/dropbox/pyston) for most of our discussions. If you need help with figuring out where to start or with the codebase, you should get a response there fairly quickly. If you found a small project to work on already and are eager to start, by all means get started! It is still a good idea to drop us a note - we might some suggestions or we can think of an edge case or two. +- Email the [pyston-dev mailing list](http://lists.pyston.org/cgi-bin/mailman/listinfo/pyston-dev), or [browse the archives](http://lists.pyston.org/pipermail/pyston-dev/) + +### Bigger projects + +There are many big areas where Pyston can use improvements. This includes, for example, a better garbage collector, better performance profiling tools (including finding more benchmarks), finding new libraries to add to our test suite, etc. These can be very involved - if you are interested bigger projects (e.g. as part of graduate studies), please contact us directly. diff --git a/LICENSE b/LICENSE index 6fa782020..dcf2a18f9 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,8 @@ -Copyright (c) 2014 Dropbox, Inc. +Unless otherwise specified (see below), the contents of this repository +are released under the following license: + + +Copyright (c) 2014-2016 Dropbox, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -11,3 +15,105 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + + +Some directories are distributed under different licenses, and contain +separate LICENSE files, which applies to all files in that directory +and its decendents, unless overridden by another LICENSE file. + + +Regardless of any separate LICENSE files, all code in this repository +falls under the following disclaimer: + Unless required by applicable law or agreed to in writing, software + distributed is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + + + +------ +CPython code + +Many files are copies, modified copies, or heavily-borrowed versions of CPython +source files, and will say so at the beginning of the file. Unless otherwise +specified, they come from CPython revision 90928:f8921f6059edf (2.7.7 release). +The original CPython code falls under the license reproduced below; the +modifications are subject to the main license stated above. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are retained +in Python alone or in any derivative version prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + +------ +LZ4 Library +Copyright (c) 2011-2014, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------ + diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..d645b9d25 --- /dev/null +++ b/Makefile @@ -0,0 +1,1091 @@ +SHELL := /usr/bin/env sh + +# prints variables for debugging +print-%: ; @echo $($*) + +# Disable builtin rules: +.SUFFIXES: + +DEPS_DIR := $(HOME)/pyston_deps + +SRC_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) +BUILD_DIR := $(SRC_DIR)/build + +LLVM_REVISION_FILE := llvm_revision.txt +LLVM_REVISION := $(shell cat $(LLVM_REVISION_FILE)) + +USE_CLANG := 1 +USE_CCACHE := 1 +USE_DISTCC := 0 + +PYPY := pypy +CPYTHON := python + +ENABLE_VALGRIND := 0 + +GDB := gdb +# If you followed the old install instructions: +# GCC_DIR := $(DEPS_DIR)/gcc-4.8.2-install +GCC_DIR := /usr +GTEST_DIR := $(DEPS_DIR)/gtest-1.7.0 + +USE_DEBUG_LIBUNWIND := 0 + +MAX_MEM_KB := 500000 +MAX_DBG_MEM_KB := 500000 + +TEST_THREADS := 1 + +ERROR_LIMIT := 10 +COLOR := 1 + +SELF_HOST := 0 + +VERBOSE := 0 + +ENABLE_INTEL_JIT_EVENTS := 0 + +CTAGS := ctags +ETAGS := ctags-exuberant -e + +NINJA := ninja + +CMAKE_DIR_DBG := $(BUILD_DIR)/Debug +CMAKE_DIR_RELEASE := $(BUILD_DIR)/Release +CMAKE_DIR_GCC := $(BUILD_DIR)/Debug-gcc +CMAKE_DIR_RELEASE_GCC := $(BUILD_DIR)/Release-gcc +CMAKE_DIR_RELEASE_GCC_PGO := $(BUILD_DIR)/Release-gcc-pgo +CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED := $(BUILD_DIR)/Release-gcc-pgo-instrumented +CMAKE_SETUP_DBG := $(CMAKE_DIR_DBG)/build.ninja +CMAKE_SETUP_RELEASE := $(CMAKE_DIR_RELEASE)/build.ninja + +# Put any overrides in here: +-include Makefile.local + + +ifneq ($(SELF_HOST),1) + PYTHON := $(CPYTHON) + PYTHON_EXE_DEPS := +else + PYTHON := $(abspath ./pyston_dbg) + PYTHON_EXE_DEPS := pyston_dbg +endif + +TOOLS_DIR := ./tools +TEST_DIR := $(abspath ./test) +TESTS_DIR := $(abspath ./test/tests) + +GPP := $(GCC_DIR)/bin/g++ +GCC := $(GCC_DIR)/bin/gcc + +ifeq ($(V),1) + VERBOSE := 1 +endif +ifeq ($(VERBOSE),1) + VERB := + ECHO := @\# +else + VERB := @ + ECHO := @ echo pyston: +endif + +LLVM_TRUNK_SRC := $(DEPS_DIR)/llvm-trunk +LLVM_SRC := $(LLVM_TRUNK_SRC) +LLVM_INC_DBG := ./build/Debug/llvm + +LLVM_BIN := ./build/Release/llvm/bin +LLVM_BIN_DBG := ./build/Debug/llvm/bin + +LLVM_LINK_LIBS := core mcjit native bitreader bitwriter ipo irreader debuginfodwarf instrumentation +ifneq ($(ENABLE_INTEL_JIT_EVENTS),0) +LLVM_LINK_LIBS += inteljitevents +endif + +NEED_OLD_JIT := $(shell if [ $(LLVM_REVISION) -le 216982 ]; then echo 1; else echo 0; fi ) +ifeq ($(NEED_OLD_JIT),1) + LLVM_LINK_LIBS += jit +endif + +ifneq ($(wildcard /usr/local/include/llvm),) +# Global include files can screw up the build, since if llvm moves a header file, +# the compiler will silently fall back to the global one that still exists. +# These include files are persistent because llvm's "make uninstall" does *not* +# delete them if the uninstall command is run on a revision that didn't include +# those files. +# This could probably be handled (somehow blacklist this particular folder?), +# but for now just error out: +$(error "Error: global llvm include files detected") +endif + +CLANG_EXE := $(LLVM_BIN)/clang +CLANGPP_EXE := $(LLVM_BIN)/clang++ + +COMMON_CFLAGS := -g -Werror -Wreturn-type -Wall -Wno-sign-compare -Wno-unused -Isrc -Ifrom_cpython/Include -fno-omit-frame-pointer +COMMON_CFLAGS += -Wextra -Wno-sign-compare +COMMON_CFLAGS += -Wno-unused-parameter # should use the "unused" attribute +COMMON_CXXFLAGS := $(COMMON_CFLAGS) +COMMON_CXXFLAGS += -std=c++11 +COMMON_CXXFLAGS += -Woverloaded-virtual +COMMON_CXXFLAGS += -fexceptions -fno-rtti +COMMON_CXXFLAGS += -Wno-invalid-offsetof # allow the use of "offsetof", and we'll just have to make sure to only use it legally. +COMMON_CXXFLAGS += -DENABLE_INTEL_JIT_EVENTS=$(ENABLE_INTEL_JIT_EVENTS) +COMMON_CXXFLAGS += -I$(DEPS_DIR)/pypa-install/include +COMMON_CXXFLAGS += -I$(DEPS_DIR)/lz4-install/include + +ifeq ($(ENABLE_VALGRIND),0) + COMMON_CXXFLAGS += -DNVALGRIND + VALGRIND := false + CMAKE_VALGRIND := +else + COMMON_CXXFLAGS += -I$(DEPS_DIR)/valgrind-3.10.0/include + VALGRIND := VALGRIND_LIB=$(DEPS_DIR)/valgrind-3.10.0-install/lib/valgrind $(DEPS_DIR)/valgrind-3.10.0-install/bin/valgrind + CMAKE_VALGRIND := -DENABLE_VALGRIND=ON -DVALGRIND_DIR=$(DEPS_DIR)/valgrind-3.10.0-install/ +endif + +COMMON_CXXFLAGS += -DGITREV=$(shell git rev-parse HEAD | head -c 12) -DLLVMREV=$(LLVM_REVISION) + +# Use our "custom linker" that calls gold if available +COMMON_LDFLAGS := -B$(TOOLS_DIR)/build_system -L/usr/local/lib -lpthread -lm -lunwind -llzma -L$(DEPS_DIR)/gcc-4.8.2-install/lib64 -lreadline -lgmp -lssl -lcrypto -lsqlite3 +COMMON_LDFLAGS += $(DEPS_DIR)/pypa-install/lib/libpypa.a +COMMON_LDFLAGS += $(DEPS_DIR)/lz4-install/lib/liblz4.a + +# Conditionally add libtinfo if available - otherwise nothing will be added +COMMON_LDFLAGS += `pkg-config tinfo 2>/dev/null && pkg-config tinfo --libs || echo ""` + +# Make sure that we put all symbols in the dynamic symbol table so that MCJIT can load them; +# TODO should probably do the linking before MCJIT +COMMON_LDFLAGS += -Wl,-E + +# We get multiple shared libraries (libstdc++, libgcc_s) from the gcc installation: +ifneq ($(GCC_DIR),/usr) + COMMON_LDFLAGS += -Wl,-rpath $(GCC_DIR)/lib64 +endif + +ifneq ($(USE_DEBUG_LIBUNWIND),0) + COMMON_LDFLAGS += -L$(DEPS_DIR)/libunwind-trunk-debug-install/lib + + # libunwind's include files warn on -Wextern-c-compat, so turn that off; + # ideally would just turn it off for header files in libunwind, maybe by + # having an internal libunwind.h that pushed/popped the diagnostic state, + # but it doesn't seem like that important a warning so just turn it off. + COMMON_CXXFLAGS += -I$(DEPS_DIR)/libunwind-trunk-debug-install/include -Wno-extern-c-compat +else + COMMON_LDFLAGS += -L$(DEPS_DIR)/libunwind-trunk-install/lib + COMMON_CXXFLAGS += -I$(DEPS_DIR)/libunwind-trunk-install/include -Wno-extern-c-compat +endif + + +EXTRA_CXXFLAGS ?= +CXXFLAGS_DBG := $(LLVM_CXXFLAGS) $(COMMON_CXXFLAGS) -O0 -DBINARY_SUFFIX= -DBINARY_STRIPPED_SUFFIX=_stripped $(EXTRA_CXXFLAGS) +CXXFLAGS_PROFILE = $(LLVM_PROFILE_CXXFLAGS) $(COMMON_CXXFLAGS) -pg -O3 -DNDEBUG -DNVALGRIND -DBINARY_SUFFIX=_release -DBINARY_STRIPPED_SUFFIX= -fno-function-sections $(EXTRA_CXXFLAGS) +CXXFLAGS_RELEASE := $(LLVM_RELEASE_CXXFLAGS) $(COMMON_CXXFLAGS) -O3 -fstrict-aliasing -DNDEBUG -DNVALGRIND -DBINARY_SUFFIX=_release -DBINARY_STRIPPED_SUFFIX= $(EXTRA_CXXFLAGS) + +LDFLAGS := $(LLVM_LDFLAGS) $(COMMON_LDFLAGS) +LDFLAGS_DEBUG := $(LLVM_DEBUG_LDFLAGS) $(COMMON_LDFLAGS) +LDFLAGS_PROFILE = $(LLVM_PROFILE_LDFLAGS) -pg $(COMMON_LDFLAGS) +LDFLAGS_RELEASE := $(LLVM_RELEASE_LDFLAGS) $(COMMON_LDFLAGS) +# Can't add this, because there are functions in the compiler that look unused but are hooked back from the runtime: +# LDFLAGS_RELEASE += -Wl,--gc-sections + + +BUILD_SYSTEM_DEPS := Makefile Makefile.local $(wildcard build_system/*) +CLANG_DEPS := $(CLANGPP_EXE) + +# settings to make clang and ccache play nicely: +CLANG_CCACHE_FLAGS := -Qunused-arguments +CLANG_EXTRA_FLAGS := -enable-tbaa -ferror-limit=$(ERROR_LIMIT) $(CLANG_CCACHE_FLAGS) +ifeq ($(COLOR),1) + CLANG_EXTRA_FLAGS += -fcolor-diagnostics +else + CLANG_EXTRA_FLAGS += -fno-color-diagnostics +endif +CLANGFLAGS := $(CXXFLAGS_DBG) $(CLANG_EXTRA_FLAGS) +CLANGFLAGS_RELEASE := $(CXXFLAGS_RELEASE) $(CLANG_EXTRA_FLAGS) + +EXT_CFLAGS := $(COMMON_CFLAGS) -fPIC -Wimplicit -O2 -Ifrom_cpython/Include +EXT_CFLAGS += -Wno-missing-field-initializers +EXT_CFLAGS += -Wno-tautological-compare -Wno-type-limits -Wno-strict-aliasing +EXT_CFLAGS_PROFILE := $(EXT_CFLAGS) -pg +ifneq ($(USE_CLANG),0) + EXT_CFLAGS += $(CLANG_EXTRA_FLAGS) +endif + +# Extra flags to enable soon: +CLANGFLAGS += -Wno-sign-conversion -Wnon-virtual-dtor -Winit-self -Wimplicit-int -Wmissing-include-dirs -Wstrict-overflow=5 -Wundef -Wpointer-arith -Wtype-limits -Wwrite-strings -Wempty-body -Waggregate-return -Wstrict-prototypes -Wold-style-definition -Wmissing-field-initializers -Wredundant-decls -Wnested-externs -Winline -Wint-to-pointer-cast -Wpointer-to-int-cast -Wlong-long -Wvla +# Want this one but there's a lot of places that I haven't followed it: +# CLANGFLAGS += -Wold-style-cast +# llvm headers fail on this one: +# CLANGFLAGS += -Wswitch-enum +# Not sure about these: +# CLANGFLAGS += -Wbad-function-cast -Wcast-qual -Wcast-align -Wmissing-prototypes -Wunreachable-code -Wfloat-equal -Wunused -Wunused-variable +# Or everything: +# CLANGFLAGS += -Weverything -Wno-c++98-compat-pedantic -Wno-shadow -Wno-padded -Wno-zero-length-array + +CXX := $(GPP) +CC := $(GCC) +CXX_PROFILE := $(GPP) +CC_PROFILE := $(GCC) +CLANG_CXX := $(CLANGPP_EXE) + +ifneq ($(USE_CLANG),0) + CXX := $(CLANG_CXX) + CC := $(CLANG_EXE) + + CXXFLAGS_DBG := $(CLANGFLAGS) + CXXFLAGS_RELEASE := $(CLANGFLAGS_RELEASE) + + BUILD_SYSTEM_DEPS := $(BUILD_SYSTEM_DEPS) $(CLANG_DEPS) +endif + +ifeq ($(USE_CCACHE),1) + CC := ccache $(CC) + CXX := ccache $(CXX) + CXX_PROFILE := ccache $(CXX_PROFILE) + CLANG_CXX := ccache $(CLANG_CXX) + CXX_ENV += CCACHE_CPP2=yes + CC_ENV += CCACHE_CPP2=yes + ifeq ($(USE_DISTCC),1) + CXX_ENV += CCACHE_PREFIX=distcc + endif +else ifeq ($(USE_DISTCC),1) + CXX := distcc $(CXX) + CXX_PROFILE := distcc $(CXX_PROFILE) + CLANG_CXX := distcc $(CLANG_CXX) +endif +CXX := $(CXX_ENV) $(CXX) +CXX_PROFILE := $(CXX_ENV) $(CXX_PROFILE) +CC := $(CC_ENV) $(CC) +CLANG_CXX := $(CXX_ENV) $(CLANG_CXX) + +BASE_SRCS := $(wildcard src/codegen/*.cpp) $(wildcard src/asm_writing/*.cpp) $(wildcard src/codegen/irgen/*.cpp) $(wildcard src/codegen/opt/*.cpp) $(wildcard src/analysis/*.cpp) $(wildcard src/core/*.cpp) src/codegen/profiling/profiling.cpp src/codegen/profiling/dumprof.cpp $(wildcard src/runtime/*.cpp) $(wildcard src/runtime/builtin_modules/*.cpp) $(wildcard src/gc/*.cpp) $(wildcard src/capi/*.cpp) +MAIN_SRCS := $(BASE_SRCS) src/jit.cpp +STDLIB_SRCS := $(wildcard src/runtime/inline/*.cpp) +SRCS := $(MAIN_SRCS) $(STDLIB_SRCS) +STDLIB_OBJS := stdlib.bc.o stdlib.stripped.bc.o +STDLIB_RELEASE_OBJS := stdlib.release.bc.o +ASM_SRCS := $(wildcard src/runtime/*.S) + +STDMODULE_SRCS := \ + errnomodule.c \ + shamodule.c \ + sha256module.c \ + sha512module.c \ + _math.c \ + mathmodule.c \ + md5.c \ + md5module.c \ + _randommodule.c \ + _sre.c \ + operator.c \ + binascii.c \ + pwdmodule.c \ + posixmodule.c \ + _struct.c \ + datetimemodule.c \ + _functoolsmodule.c \ + _collectionsmodule.c \ + itertoolsmodule.c \ + resource.c \ + signalmodule.c \ + selectmodule.c \ + fcntlmodule.c \ + threadmodule.c \ + timemodule.c \ + arraymodule.c \ + zlibmodule.c \ + _codecsmodule.c \ + socketmodule.c \ + unicodedata.c \ + _weakref.c \ + cStringIO.c \ + _io/bufferedio.c \ + _io/bytesio.c \ + _io/fileio.c \ + _io/iobase.c \ + _io/_iomodule.c \ + _io/stringio.c \ + _io/textio.c \ + zipimport.c \ + _csv.c \ + _ssl.c \ + getpath.c \ + _sqlite/cache.c \ + _sqlite/connection.c \ + _sqlite/cursor.c \ + _sqlite/microprotocols.c \ + _sqlite/module.c \ + _sqlite/prepare_protocol.c \ + _sqlite/row.c \ + _sqlite/statement.c \ + _sqlite/util.c \ + stropmodule.c \ + $(EXTRA_STDMODULE_SRCS) + +STDOBJECT_SRCS := \ + structseq.c \ + capsule.c \ + stringobject.c \ + exceptions.c \ + floatobject.c \ + unicodeobject.c \ + unicodectype.c \ + bytearrayobject.c \ + bytes_methods.c \ + weakrefobject.c \ + memoryobject.c \ + iterobject.c \ + bufferobject.c \ + cobject.c \ + dictproxy.c \ + $(EXTRA_STDOBJECT_SRCS) + +STDPYTHON_SRCS := \ + pyctype.c \ + getargs.c \ + formatter_string.c \ + pystrtod.c \ + dtoa.c \ + formatter_unicode.c \ + structmember.c \ + marshal.c \ + mystrtoul.c \ + $(EXTRA_STDPYTHON_SRCS) + +STDPARSER_SRCS := \ + myreadline.c + +FROM_CPYTHON_SRCS := $(addprefix from_cpython/Modules/,$(STDMODULE_SRCS)) $(addprefix from_cpython/Objects/,$(STDOBJECT_SRCS)) $(addprefix from_cpython/Python/,$(STDPYTHON_SRCS)) $(addprefix from_cpython/Parser/,$(STDPARSER_SRCS)) + +# The stdlib objects have slightly longer dependency chains, +# so put them first in the list: +OBJS := $(STDLIB_OBJS) $(SRCS:.cpp=.o) $(FROM_CPYTHON_SRCS:.c=.o) $(ASM_SRCS) +ASTPRINT_OBJS := $(STDLIB_OBJS) $(BASE_SRCS:.cpp=.o) $(FROM_CPYTHON_SRCS:.c=.o) $(ASM_SRCS) +PROFILE_OBJS := $(STDLIB_RELEASE_OBJS) $(MAIN_SRCS:.cpp=.prof.o) $(STDLIB_SRCS:.cpp=.release.o) $(FROM_CPYTHON_SRCS:.c=.prof.o) $(ASM_SRCS) +OPT_OBJS := $(STDLIB_RELEASE_OBJS) $(SRCS:.cpp=.release.o) $(FROM_CPYTHON_SRCS:.c=.release.o) $(ASM_SRCS) + +OPTIONAL_SRCS := src/codegen/profiling/oprofile.cpp src/codegen/profiling/pprof.cpp +TOOL_SRCS := $(wildcard $(TOOLS_DIR)/*.cpp) + +UNITTEST_DIR := $(TEST_DIR)/unittests +UNITTEST_SRCS := $(wildcard $(UNITTEST_DIR)/*.cpp) + +NONSTDLIB_SRCS := $(MAIN_SRCS) $(OPTIONAL_SRCS) $(TOOL_SRCS) $(UNITTEST_SRCS) + +.DEFAULT_GOAL := small_all + +# The set of dependencies (beyond the executable) required to do `make run_foo`. +# ext_pyston (building test/test_extension) is required even in cmake mode since +# we manually add test/test_extension to the path +RUN_DEPS := ext_pyston + +# The set of dependencies (beyond the executable) required to do `make check` / `make check_foo`. +# The tester bases all paths based on the executable, so in cmake mode we need to have cmake +# build all of the shared modules. +check-deps: + $(NINJA) -C $(CMAKE_DIR_DBG) check-deps + +.PHONY: small_all +small_all: pyston_dbg $(RUN_DEPS) + +.PHONY: all _all +# all: llvm + # @# have to do this in a recursive make so that dependency is enforced: + # $(MAKE) pyston_all +# all: pyston_dbg pyston_release pyston_oprof pyston_prof $(OPTIONAL_SRCS:.cpp=.o) ext_python ext_pyston +all: pyston_dbg pyston_release pyston_gcc unittests check-deps $(RUN_DEPS) + +ALL_HEADERS := $(wildcard src/*/*.h) $(wildcard src/*/*/*.h) $(wildcard from_cpython/Include/*.h) +tags: $(SRCS) $(OPTIONAL_SRCS) $(FROM_CPYTHON_SRCS) $(ALL_HEADERS) + $(ECHO) Calculating tags... + $(VERB) $(CTAGS) $^ + +TAGS: $(SRCS) $(OPTIONAL_SRCS) $(FROM_CPYTHON_SRCS) $(ALL_HEADERS) + $(ECHO) Calculating TAGS... + $(VERB) $(ETAGS) $^ + +NON_ENTRY_OBJS := $(filter-out src/jit.o,$(OBJS)) + +define add_unittest +$(eval \ +.PHONY: $1_unittest +$1_unittest: + $(NINJA) -C $(CMAKE_DIR_DBG) $1_unittest $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_DBG)/$1_unittest . +dbg_$1_unittests: $1_unittest + zsh -c 'ulimit -m $(MAX_MEM_KB); time $(GDB) $(GDB_CMDS) --args ./$1_unittest --gtest_break_on_failure $(ARGS)' +unittests:: $1_unittest +run_$1_unittests: $1_unittest + zsh -c 'ulimit -m $(MAX_MEM_KB); time ./$1_unittest $(ARGS)' +run_unittests:: run_$1_unittests +) +endef + +GDB_CMDS := $(GDB_PRE_CMDS) --ex "set confirm off" --ex "handle SIGUSR2 pass nostop noprint" --ex run --ex "bt 20" $(GDB_POST_CMDS) +BR ?= +ARGS ?= +ifneq ($(BR),) + override GDB_CMDS := --ex "break $(BR)" $(GDB_CMDS) +endif +$(call add_unittest,gc) +$(call add_unittest,analysis) + + +define checksha + test "$$($1 | sha1sum)" = "$2 -" +endef + +.PHONY: analyze +analyze: + $(MAKE) clean + PATH=$$PATH:$(DEPS_DIR)/llvm-trunk/tools/clang/tools/scan-view $(DEPS_DIR)/llvm-trunk/tools/clang/tools/scan-build/scan-build \ + --use-analyzer $(CLANGPP_EXE) --use-c++ $(CLANGPP_EXE) -V \ + $(MAKE) pyston_dbg USE_DISTCC=0 USE_CCACHE=0 + +.PHONY: lint cpplint +lint: $(PYTHON_EXE_DEPS) + $(ECHO) linting... + $(VERB) cd src && $(PYTHON) ../tools/lint.py +cpplint: + $(VERB) $(PYTHON) $(TOOLS_DIR)/cpplint.py --filter=-whitespace,-build/header_guard,-build/include_order,-readability/todo $(SRCS) + +.PHONY: check +check: + @# These are ordered roughly in decreasing order of (chance will expose issue) / (time to run test) + + $(MAKE) pyston_dbg check-deps + ( cd $(CMAKE_DIR_DBG) && ctest -V ) + + $(MAKE) pyston_release + ( cd $(CMAKE_DIR_RELEASE) && ctest -V -R pyston ) + + echo "All tests passed" + +# A stripped down set of tests, meant as a quick smoke test to run before submitting a PR and having +# Travis-CI do the full test. +.PHONY: quick_check +quick_check: + $(MAKE) pyston_dbg + $(MAKE) check-deps + ( cd $(CMAKE_DIR_DBG) && ctest -V -R '^(check-format|unittests|pyston_defaults_tests|pyston_defaults_cpython)$$' ) + + +Makefile.local: + echo "Creating default Makefile.local" + which ninja-build >/dev/null && echo "NINJA := ninja-build" >> Makefile.local + +llvm_up: + $(CPYTHON) $(TOOLS_DIR)/git_svn_gotorev.py $(LLVM_SRC) $(LLVM_REVISION) ./llvm_patches + $(CPYTHON) $(TOOLS_DIR)/git_svn_gotorev.py $(LLVM_SRC)/tools/clang $(LLVM_REVISION) ./clang_patches + +## TOOLS: + +$(TOOLS_DIR)/demangle: $(TOOLS_DIR)/demangle.cpp $(BUILD_SYSTEM_DEPS) + $(CXX) $< -o $@ +.PHONY: demangle +demangle: $(TOOLS_DIR)/demangle + $(TOOLS_DIR)/demangle $(ARGS) + +$(TOOLS_DIR)/mcjitcache: $(TOOLS_DIR)/mcjitcache.o $(LLVM_DEPS) $(BUILD_SYSTEM_DEPS) + $(CXX) $< $(LDFLAGS) -o $@ +# Build a version of mcjitcache off the llvm_release repo mostly to avoid a dependence of the opt builds +# on the llvm_quick build. +$(TOOLS_DIR)/mcjitcache_release: $(TOOLS_DIR)/mcjitcache.release.o $(LLVM_RELEASE_DEPS) $(BUILD_SYSTEM_DEPS) + $(CXX) $< $(LDFLAGS_RELEASE) -o $@ + +$(TOOLS_DIR)/publicize: $(TOOLS_DIR)/publicize.o $(LLVM_DEPS) $(BUILD_SYSTEM_DEPS) + $(ECHO) Linking $(TOOLS_DIR)/publicize + $(VERB) $(CXX) $< $(LDFLAGS) -o $@ -lLLVMBitWriter + +$(TOOLS_DIR)/publicize_release: $(TOOLS_DIR)/publicize.release.o $(LLVM_RELEASE_DEPS) $(BUILD_SYSTEM_DEPS) + $(ECHO) Linking $(TOOLS_DIR)/publicize_release + $(VERB) $(CXX) $< $(LDFLAGS_RELEASE) -o $@ -lLLVMBitWriter + +$(TOOLS_DIR)/astprint: $(TOOLS_DIR)/astprint.cpp $(BUILD_SYSTEM_DEPS) $(LLVM_DEPS) $(ASTPRINT_OBJS) + $(ECHO) Linking $(TOOLS_DIR)/astprint + $(VERB) $(CXX) $< -o $@ $(LLVM_LIB_DEPS) $(ASTPRINT_OBJS) $(LDFLAGS) $(STDLIB_SRCS:.cpp=.o) $(CXXFLAGS_DBG) + +.PHONY: astprint astcompare + +astprint: $(TOOLS_DIR)/astprint + +astcompare: astprint + $(ECHO) Running libpypa vs CPython AST result comparison test + $(TOOLS_DIR)/astprint_test.sh && echo "Success" || echo "Failure" + +## END OF TOOLS + + +CODEGEN_SRCS := $(wildcard src/codegen/*.cpp) $(wildcard src/codegen/*/*.cpp) + +# args: suffix (ex: ".release"), CXXFLAGS +define make_compile_config +$(eval \ +$$(NONSTDLIB_SRCS:.cpp=$1.o): CXXFLAGS:=$2 +$$(SRCS:.cpp=$1.o.bc): CXXFLAGS:=$2 + +## Need to set CXXFLAGS so that this rule doesn't inherit the '-include' rule from the +## thing that's calling it. At the same time, also filter out "-DGITREV=foo". +%$1.h.pch: CXXFLAGS:=$(filter-out -DGITREV%,$(2)) +%$1.h.pch: %.h $$(BUILD_SYSTEM_DEPS) + $$(ECHO) Compiling $$@ + $$(VERB) rm -f $$@-* + $$(VERB) $$(CLANGPP_EXE) $$(CXXFLAGS) -MMD -MP -MF $$(patsubst %.pch,%.d,$$@) -x c++-header $$< -o $$@ +$$(CODEGEN_SRCS:.cpp=$1.o): CXXFLAGS += -include src/codegen/irgen$1.h +$$(CODEGEN_SRCS:.cpp=$1.o): src/codegen/irgen$1.h.pch + +$$(NONSTDLIB_SRCS:.cpp=$1.o): %$1.o: %.cpp $$(BUILD_SYSTEM_DEPS) + $$(ECHO) Compiling $$@ + $$(VERB) $$(CXX) $$(CXXFLAGS) -MMD -MP -MF $$(patsubst %.o,%.d,$$@) $$< -c -o $$@ + +# For STDLIB sources, first compile to bitcode: +$$(STDLIB_SRCS:.cpp=$1.o.bc): %$1.o.bc: %.cpp $$(BUILD_SYSTEM_DEPS) $$(CLANG_DEPS) + $$(ECHO) Compiling $$@ + $$(VERB) $$(CLANG_CXX) $$(CXXFLAGS) $$(CLANG_EXTRA_FLAGS) -MMD -MP -MF $$(patsubst %.bc,%.d,$$@) $$< -c -o $$@ -emit-llvm -g + +stdlib$1.unopt.bc: $$(STDLIB_SRCS:.cpp=$1.o.pub.bc) + $$(ECHO) Linking $$@ + $$(VERB) $$(LLVM_BIN)/llvm-link $$^ -o $$@ + +) +endef + +PASS_SRCS := codegen/opt/aa.cpp +PASS_OBJS := $(PASS_SRCS:.cpp=.standalone.o) + +%.o: %.cpp $(CMAKE_SETUP_DBG) + $(NINJA) -C $(CMAKE_DIR_DBG) src/CMakeFiles/PYSTON_OBJECTS.dir/$(patsubst src/%.o,%.cpp.o,$@) $(NINJAFLAGS) +%.release.o: %.cpp $(CMAKE_SETUP_RELEASE) + $(NINJA) -C $(CMAKE_DIR_RELEASE) src/CMakeFiles/PYSTON_OBJECTS.dir/$(patsubst src/%.release.o,%.cpp.o,$@) $(NINJAFLAGS) + +$(UNITTEST_SRCS:.cpp=.o): CXXFLAGS += -isystem $(GTEST_DIR)/include + +$(PASS_SRCS:.cpp=.standalone.o): %.standalone.o: %.cpp $(BUILD_SYSTEM_DEPS) + $(ECHO) Compiling $@ + $(VERB) $(CXX) $(CXXFLAGS_DBG) -DSTANDALONE -MMD -MP -MF $(patsubst %.o,%.d,$@) $< -c -o $@ +$(NONSTDLIB_SRCS:.cpp=.prof.o): %.prof.o: %.cpp $(BUILD_SYSTEM_DEPS) + $(ECHO) Compiling $@ + $(VERB) $(CXX_PROFILE) $(CXXFLAGS_PROFILE) -MMD -MP -MF $(patsubst %.o,%.d,$@) $< -c -o $@ + +# Then, publicize symbols: +%.pub.bc: %.bc $(TOOLS_DIR)/publicize + $(ECHO) Publicizing $< + $(VERB) $(TOOLS_DIR)/publicize $< -o $@ + +# Then, compile the publicized bitcode into normal .o files +%.o: %.o.pub.bc $(BUILD_SYSTEM_DEPS) $(CLANG_DEPS) + $(ECHO) Compiling bitcode to $@ + $(VERB) $(LLVM_BIN)/clang $(CLANGFLAGS) -O3 -c $< -o $@ + + + +passes.so: $(PASS_OBJS) $(BUILD_SYSTEM_DEPS) + $(CXX) -shared $(PASS_OBJS) -o $@ -shared -rdynamic +test_opt: passes.so + $(LLVM_BUILD)/Release+Asserts/bin/opt -load passes.so test.ll -S -o test.opt.ll $(ARGS) +test_dbg_opt: passes.so + $(GDB) --args $(LLVM_BUILD)/Release+Asserts/bin/opt -O3 -load passes.so test.ll -S -o test.opt.ll $(ARGS) + + +# Optimize and/or strip it: +stdlib.bc: OPT_OPTIONS=-O3 +stdlib.release.bc: OPT_OPTIONS=-O3 -strip-debug +.PRECIOUS: %.bc %.stripped.bc +%.bc: %.unopt.bc + $(ECHO) Optimizing $< -\> $@ + $(VERB) $(LLVM_BIN)/opt $(OPT_OPTIONS) $< -o $@ +%.stripped.bc: %.bc + $(ECHO) Stripping $< -\> $@ + $(VERB) $(LLVM_BIN)/opt -strip-debug $< -o $@ + +# Then do "ld -b binary" to create a .o file for embedding into the executable +# $(STDLIB_OBJS) $(STDLIB_RELEASE_OBJS): %.o: % $(BUILD_SYSTEM_DEPS) +stdli%.bc.o: stdli%.bc $(BUILD_SYSTEM_DEPS) + $(ECHO) Embedding $< + $(VERB) ld -r -b binary $< -o $@ + +# Optionally, disassemble the bitcode files: +%.ll: %.bc + $(LLVM_BIN)/llvm-dis $< + +# Not used currently, but here's how to pre-jit the stdlib bitcode: +%.release.cache: %.release.bc mcjitcache_release + ./mcjitcache_release -p $< -o $@ +%.cache: %.bc mcjitcache + ./mcjitcache -p $< -o $@ + +# args: output suffx (ex: _release), objects to link, LDFLAGS, other deps +define link +$(eval \ +pyston$1: $2 $4 + $$(ECHO) Linking $$@ + $$(VERB) $$(CXX) $2 $3 -o $$@ +) +endef + + +# Finally, link it all together: +$(call link,_grwl,stdlib.grwl.bc.o $(SRCS:.cpp=.grwl.o),$(LDFLAGS_RELEASE),$(LLVM_RELEASE_DEPS)) +$(call link,_grwl_dbg,stdlib.grwl_dbg.bc.o $(SRCS:.cpp=.grwl_dbg.o),$(LDFLAGS),$(LLVM_DEPS)) +$(call link,_nosync,stdlib.nosync.bc.o $(SRCS:.cpp=.nosync.o),$(LDFLAGS_RELEASE),$(LLVM_RELEASE_DEPS)) +pyston_oprof: $(OPT_OBJS) src/codegen/profiling/oprofile.o $(LLVM_DEPS) + $(ECHO) Linking $@ + $(VERB) $(CXX) $(OPT_OBJS) src/codegen/profiling/oprofile.o $(LDFLAGS_RELEASE) -lopagent -o $@ +pyston_pprof: $(OPT_OBJS) src/codegen/profiling/pprof.release.o $(LLVM_DEPS) + $(ECHO) Linking $@ + $(VERB) $(CXX) $(OPT_OBJS) src/codegen/profiling/pprof.release.o $(LDFLAGS_RELEASE) -lprofiler -o $@ +pyston_prof: $(PROFILE_OBJS) $(LLVM_DEPS) + $(ECHO) Linking $@ + $(VERB) $(CXX) $(PROFILE_OBJS) $(LDFLAGS) -pg -o $@ +pyston_profile: $(PROFILE_OBJS) $(LLVM_PROFILE_DEPS) + $(ECHO) Linking $@ + $(VERB) $(CXX) $(PROFILE_OBJS) $(LDFLAGS_PROFILE) -o $@ + +clang_check: + @clang --version >/dev/null || (echo "clang not available"; false) + +cmake_check: + @cmake --version >/dev/null || (echo "cmake not available"; false) + @$(NINJA) --version >/dev/null || (echo "ninja not available"; false) + +COMMON_CMAKE_OPTIONS := $(SRC_DIR) -DTEST_THREADS=$(TEST_THREADS) $(CMAKE_VALGRIND) -DCMAKE_EXPORT_COMPILE_COMMANDS=ON -GNinja + +.PHONY: cmake_check clang_check +$(CMAKE_SETUP_DBG): + @$(MAKE) cmake_check + @$(MAKE) clang_check + @mkdir -p $(CMAKE_DIR_DBG) + cd $(CMAKE_DIR_DBG); CC='clang' CXX='clang++' cmake $(COMMON_CMAKE_OPTIONS) -DCMAKE_BUILD_TYPE=Debug +$(CMAKE_SETUP_RELEASE): + @$(MAKE) cmake_check + @$(MAKE) clang_check + @mkdir -p $(CMAKE_DIR_RELEASE) + cd $(CMAKE_DIR_RELEASE); CC='clang' CXX='clang++' cmake $(COMMON_CMAKE_OPTIONS) -DCMAKE_BUILD_TYPE=Release + +# Shared modules (ie extension modules that get built using pyston on setup.py) that we will ask CMake +# to build. You can flip this off to allow builds to continue even if self-hosting the sharedmods would fail. +CMAKE_SHAREDMODS := sharedmods ext_pyston + +.PHONY: pyston_dbg pyston_release +pyston_dbg: $(CMAKE_SETUP_DBG) + $(NINJA) -C $(CMAKE_DIR_DBG) pyston copy_stdlib copy_libpyston $(CMAKE_SHAREDMODS) ext_cpython $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_DBG)/pyston $@ +pyston_release: $(CMAKE_SETUP_RELEASE) + $(NINJA) -C $(CMAKE_DIR_RELEASE) pyston copy_stdlib copy_libpyston $(CMAKE_SHAREDMODS) ext_cpython $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_RELEASE)/pyston $@ + +CMAKE_SETUP_GCC := $(CMAKE_DIR_GCC)/build.ninja +$(CMAKE_SETUP_GCC): + @$(MAKE) cmake_check + @mkdir -p $(CMAKE_DIR_GCC) + cd $(CMAKE_DIR_GCC); CC='$(GCC)' CXX='$(GPP)' cmake $(COMMON_CMAKE_OPTIONS) -DCMAKE_BUILD_TYPE=Debug +.PHONY: pyston_gcc +pyston_gcc: $(CMAKE_SETUP_GCC) + $(NINJA) -C $(CMAKE_DIR_GCC) pyston copy_stdlib copy_libpyston $(CMAKE_SHAREDMODS) ext_cpython $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_GCC)/pyston $@ + +CMAKE_SETUP_RELEASE_GCC := $(CMAKE_DIR_RELEASE_GCC)/build.ninja +$(CMAKE_SETUP_RELEASE_GCC): + @$(MAKE) cmake_check + @mkdir -p $(CMAKE_DIR_RELEASE_GCC) + cd $(CMAKE_DIR_RELEASE_GCC); CC='$(GCC)' CXX='$(GPP)' cmake $(COMMON_CMAKE_OPTIONS) -DCMAKE_BUILD_TYPE=Release +.PHONY: pyston_release_gcc +pyston_release_gcc: $(CMAKE_SETUP_RELEASE_GCC) + $(NINJA) -C $(CMAKE_DIR_RELEASE_GCC) pyston copy_stdlib copy_libpyston $(CMAKE_SHAREDMODS) ext_cpython $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_RELEASE_GCC)/pyston $@ + + +# GCC PGO build +CMAKE_SETUP_RELEASE_GCC_PGO := $(CMAKE_DIR_RELEASE_GCC_PGO)/build.ninja +$(CMAKE_SETUP_RELEASE_GCC_PGO): + @$(MAKE) cmake_check + @mkdir -p $(CMAKE_DIR_RELEASE_GCC_PGO) + cd $(CMAKE_DIR_RELEASE_GCC_PGO); CC='$(GCC)' CXX='$(GPP)' cmake $(COMMON_CMAKE_OPTIONS) -DCMAKE_BUILD_TYPE=Release -DENABLE_PGO=ON -DPROFILE_STATE=use +.PHONY: pyston_release_gcc_pgo +pyston_release_gcc_pgo: $(CMAKE_SETUP_RELEASE_GCC_PGO) $(CMAKE_DIR_RELEASE_GCC_PGO)/.trained + $(NINJA) -C $(CMAKE_DIR_RELEASE_GCC_PGO) pyston copy_stdlib copy_libpyston $(CMAKE_SHAREDMODS) ext_cpython $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_RELEASE_GCC_PGO)/pyston $@ + +CMAKE_SETUP_RELEASE_GCC_PGO_INSTRUMENTED := $(CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED)/build.ninja +$(CMAKE_SETUP_RELEASE_GCC_PGO_INSTRUMENTED): + @$(MAKE) cmake_check + @mkdir -p $(CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED) + cd $(CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED); CC='$(GCC)' CXX='$(GPP)' cmake $(COMMON_CMAKE_OPTIONS) -DCMAKE_BUILD_TYPE=Release -DENABLE_PGO=ON -DPROFILE_STATE=generate -DPROFILE_DIR=$(CMAKE_DIR_RELEASE_GCC_PGO) + +.PHONY: pyston_release_gcc_pgo_instrumented +pyston_release_gcc_pgo_instrumented: $(CMAKE_SETUP_RELEASE_GCC_PGO_INSTRUMENTED) + $(NINJA) -C $(CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED) pyston copy_stdlib copy_libpyston $(CMAKE_SHAREDMODS) ext_cpython $(NINJAFLAGS) + ln -sf $(CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED)/pyston $@ + +PROFILE_TARGET := ./pyston $(SRC_DIR)/minibenchmarks/combined.py + +$(CMAKE_DIR_RELEASE_GCC_PGO)/.trained: pyston_release_gcc_pgo_instrumented + @echo "Training pgo" + mkdir -p $(CMAKE_DIR_RELEASE_GCC_PGO) + (cd $(CMAKE_DIR_RELEASE_GCC_PGO_INSTRUMENTED) && $(PROFILE_TARGET) && $(PROFILE_TARGET) ) && touch $(CMAKE_DIR_RELEASE_GCC_PGO)/.trained + +pyston_pgo: pyston_release_gcc_pgo + ln -sf $< $@ + +.PHONY: format check_format +format: $(CMAKE_SETUP_RELEASE) + $(NINJA) -C $(CMAKE_DIR_RELEASE) format +check_format: $(CMAKE_SETUP_RELEASE) + $(NINJA) -C $(CMAKE_DIR_RELEASE) check-format + +-include $(wildcard src/*.d) $(wildcard src/*/*.d) $(wildcard src/*/*/*.d) $(wildcard $(UNITTEST_DIR)/*.d) $(wildcard from_cpython/*/*.d) $(wildcard from_cpython/*/*/*.d) + +.PHONY: clean +clean: + @ find src $(TOOLS_DIR) $(TEST_DIR) ./from_cpython ./lib_pyston \( -name '*.o' -o -name '*.d' -o -name '*.py_cache' -o -name '*.bc' -o -name '*.o.ll' -o -name '*.pub.ll' -o -name '*.cache' -o -name 'stdlib*.ll' -o -name '*.pyc' -o -name '*.so' -o -name '*.a' -o -name '*.expected_cache' -o -name '*.pch' \) -print -delete + @ find $(BUILD_DIR) \( -name 'pyston*' -executable -type f \) -print -delete + @ rm -vf pyston_dbg pyston_release pyston_gcc + @ find $(TOOLS_DIR) -maxdepth 0 -executable -type f -print -delete + @ rm -rf oprofile_data + @ rm -f *_unittest + +# A helper function that lets me run subdirectory rules from the top level; +# ex instead of saying "make tests/run_1", I can just write "make run_1" +# +# The target to ultimately be called must be prefixed with nosearch_, for example: +# nosearch_example_%: %.py +# echo $^ +# $(call make_search,example_%) +# This prevents us from searching recursively, which can result in a combinatorial explosion. +define make_search +$(eval \ +.PHONY: $1 nosearch_$1 +$1: nosearch_$1 +$1: $(TESTS_DIR)/nosearch_$1 ; +$1: $(TEST_DIR)/cpython/nosearch_$1 ; +$1: $(TEST_DIR)/integration/nosearch_$1 ; +$1: $(TEST_DIR)/extra/nosearch_$1 ; +$1: ./microbenchmarks/nosearch_$1 ; +$1: ./minibenchmarks/nosearch_$1 ; +$1: ./benchmarks/nosearch_$1 ; +$1: $(HOME)/pyston-perf/benchmarking/benchmark_suite/nosearch_$1 ; +$(patsubst %, $$1: %/nosearch_$$1 ;,$(EXTRA_SEARCH_DIRS)) +) +endef + +define make_target +$(eval \ +.PHONY: test$1 check$1 +check$1 test$1: $(PYTHON_EXE_DEPS) pyston$1 + $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k $(TESTS_DIR) $(ARGS) + @# we pass -I to cpython tests and skip failing ones because they are sloooow otherwise + $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-S -k --exit-code-only --skip-failing -t50 $(TEST_DIR)/cpython $(ARGS) + $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -k -a=-S --exit-code-only --skip-failing -t600 $(TEST_DIR)/integration $(ARGS) + $(PYTHON) $(TOOLS_DIR)/tester.py -a=-X -R pyston$1 -j$(TEST_THREADS) -a=-n -a=-S -k $(TESTS_DIR) $(ARGS) + $(PYTHON) $(TOOLS_DIR)/tester.py -R pyston$1 -j$(TEST_THREADS) -a=-O -a=-S -k $(TESTS_DIR) $(ARGS) + +.PHONY: run$1 dbg$1 +run$1: pyston$1 $$(RUN_DEPS) + PYTHONPATH=test/test_extension:$${PYTHONPATH} ./pyston$1 $$(ARGS) +dbg$1: pyston$1 $$(RUN_DEPS) + PYTHONPATH=test/test_extension:$${PYTHONPATH} zsh -c 'ulimit -m $$(MAX_DBG_MEM_KB); $$(GDB) $$(GDB_CMDS) --args ./pyston$1 $$(ARGS)' +nosearch_run$1_%: %.py pyston$1 $$(RUN_DEPS) + $(VERB) PYTHONPATH=test/test_extension:$${PYTHONPATH} zsh -c 'ulimit -m $$(MAX_MEM_KB); time ./pyston$1 $$(ARGS) $$<' +$$(call make_search,run$1_%) +nosearch_dbg$1_%: %.py pyston$1 $$(RUN_DEPS) + $(VERB) PYTHONPATH=test/test_extension:$${PYTHONPATH} zsh -c 'ulimit -m $$(MAX_DBG_MEM_KB); $$(GDB) $$(GDB_CMDS) --args ./pyston$1 $$(ARGS) $$<' +$$(call make_search,dbg$1_%) + +ifneq ($$(ENABLE_VALGRIND),0) +nosearch_memcheck$1_%: %.py pyston$1 $$(RUN_DEPS) + PYTHONPATH=test/test_extension:$${PYTHONPATH} $$(VALGRIND) --tool=memcheck --leak-check=no --db-attach=yes ./pyston$1 $$(ARGS) $$< +$$(call make_search,memcheck$1_%) +nosearch_memcheck_gdb$1_%: %.py pyston$1 $$(RUN_DEPS) + set +e; PYTHONPATH=test/test_extension:$${PYTHONPATH} $$(VALGRIND) -v -v -v -v -v --tool=memcheck --leak-check=no --track-origins=yes --vgdb=yes --vgdb-error=0 ./pyston$1 $$(ARGS) $$< & export PID=$$$$! ; \ + $$(GDB) --ex "set confirm off" --ex "target remote | $$(DEPS_DIR)/valgrind-3.10.0-install/bin/vgdb" --ex "continue" --ex "bt" ./pyston$1; kill -9 $$$$PID +$$(call make_search,memcheck_gdb$1_%) +nosearch_memleaks$1_%: %.py pyston$1 $$(RUN_DEPS) + PYTHONPATH=test/test_extension:$${PYTHONPATH} $$(VALGRIND) --tool=memcheck --leak-check=full --leak-resolution=low --show-reachable=yes ./pyston$1 $$(ARGS) $$< +$$(call make_search,memleaks$1_%) +nosearch_cachegrind$1_%: %.py pyston$1 $$(RUN_DEPS) + PYTHONPATH=test/test_extension:$${PYTHONPATH} $$(VALGRIND) --tool=cachegrind ./pyston$1 $$(ARGS) $$< +$$(call make_search,cachegrind$1_%) +endif + +.PHONY: perf$1_% +nosearch_perf$1_%: %.py pyston$1 + PYTHONPATH=test/test_extension:$${PYTHONPATH} perf record -g -- ./pyston$1 -q -p $$(ARGS) $$< + @$(MAKE) perf_report +$$(call make_search,perf$1_%) + +) +endef + +.PHONY: perf_report +perf_report: + perf report -n + +.PHONY: run run_% dbg_% debug_% perf_% +run: run_dbg +dbg: dbg_dbg +run_%: run_dbg_% + @true +dbg_%: dbg_dbg_% + @true +debug_%: dbg_debug_% + @true +perf_%: perf_release_% + @true + +$(call make_target,_dbg) +$(call make_target,_debug) +$(call make_target,_release) +# $(call make_target,_grwl) +# $(call make_target,_grwl_dbg) +# $(call make_target,_nosync) +$(call make_target,_prof) +$(call make_target,_gcc) +$(call make_target,_release_gcc) + +nosearch_runpy_% nosearch_pyrun_%: %.py ext_python + $(VERB) PYTHONPATH=test/test_extension/build/lib.linux-x86_64-2.7:$${PYTHONPATH} zsh -c 'time $(CPYTHON) $<' +nosearch_pypyrun_%: %.py ext_python + $(VERB) PYTHONPATH=test/test_extension/build/lib.linux-x86_64-2.7:$${PYTHONPATH} zsh -c 'time $(PYPY) $<' +$(call make_search,runpy_%) +$(call make_search,pyrun_%) +$(call make_search,pypyrun_%) + +nosearch_check_%: %.py pyston_dbg check-deps + $(MAKE) check_dbg ARGS="$(patsubst %.py,%,$(notdir $<)) -K" +$(call make_search,check_%) + +nosearch_dbgpy_% nosearch_pydbg_%: %.py ext_pythondbg + export PYTHON_VERSION=$$(python2.7-dbg -V 2>&1 | awk '{print $$2}'); PYTHONPATH=test/test_extension/build/lib.linux-x86_64-2.7-pydebug $(GDB) --ex "dir $(DEPS_DIR)/python-src/python2.7-$$PYTHON_VERSION/debian" $(GDB_CMDS) --args python2.7-dbg $< +$(call make_search,dbgpy_%) +$(call make_search,pydbg_%) + +pydbg: ext_pythondbg + export PYTHON_VERSION=$$(python2.7-dbg -V 2>&1 | awk '{print $$2}'); PYTHONPATH=test/test_extension/build/lib.linux-x86_64-2.7-pydebug $(GDB) --ex "dir $(DEPS_DIR)/python-src/python2.7-$$PYTHON_VERSION/debian" $(GDB_CMDS) --args python2.7-dbg + +# "kill valgrind": +kv: + ps aux | awk '/[v]algrind/ {print $$2}' | xargs kill -9; true + +# gprof-based profiling: +nosearch_prof_%: %.py pyston_prof + zsh -c 'time ./pyston_prof $(ARGS) $<' + gprof ./pyston_prof gmon.out > $(patsubst %,%.out,$@) +$(call make_search,prof_%) +nosearch_profile_%: %.py pyston_profile + time ./pyston_profile -p $(ARGS) $< + gprof ./pyston_profile gmon.out > $(patsubst %,%.out,$@) +$(call make_search,profile_%) + +# pprof-based profiling: +nosearch_pprof_%: %.py $(PYTHON_EXE_DEPS) pyston_pprof + CPUPROFILE_FREQUENCY=1000 CPUPROFILE=$@.out ./pyston_pprof -p $(ARGS) $< + pprof --raw pyston_pprof $@.out > $@_raw.out + $(PYTHON) codegen/profiling/process_pprof.py $@_raw.out pprof.jit > $@_processed.out + pprof --text $@_processed.out + # rm -f pprof.out pprof.raw pprof.jit +$(call make_search,pprof_%) + +# oprofile-based profiling: +.PHONY: oprof_collect_% opreport +oprof_collect_%: %.py pyston_oprof + sudo opcontrol --image pyston_oprof + # sudo opcontrol --event CPU_CLK_UNHALTED:28000 + # sudo opcontrol --cpu-buffer-size=128000 --buffer-size=1048576 --buffer-watershed=1048000 + sudo opcontrol --reset + sudo opcontrol --start + time ./pyston_oprof -p $(ARGS) $< + sudo opcontrol --stop + sudo opcontrol --dump + sudo opcontrol --image all --event default --cpu-buffer-size=0 --buffer-size=0 --buffer-watershed=0 + sudo opcontrol --deinit + sudo opcontrol --init +nosearch_oprof_%: oprof_collect_% + $(MAKE) opreport +$(call make_search,oprof_%) +opreport: + ! [ -d oprofile_data ] + opreport -l -t 0.2 -a pyston_oprof + # opreport lib-image:pyston_oprof -l -t 0.2 -a | head -n 25 + +.PHONY: oprof_collectcg_% opreportcg +oprof_collectcg_%: %.py pyston_oprof + operf -g -e CPU_CLK_UNHALTED:90000 ./pyston_oprof -p $(ARGS) $< +nosearch_oprofcg_%: oprof_collectcg_% + $(MAKE) opreportcg +$(call make_search,oprofcg_%) +opreportcg: + opreport lib-image:pyston_oprof -l -t 0.2 -a --callgraph + +.PHONY: watch_% watch wdbg_% +watch_%: + @ ( ulimit -t 60; ulimit -m $(MAK_MEM_KB); \ + TARGET=$(dir $@)$(patsubst watch_%,%,$(notdir $@)); \ + clear; $(MAKE) $$TARGET $(WATCH_ARGS); true; \ + while inotifywait -q -e modify -e attrib -e move -e move_self -e create -e delete -e delete_self \ + Makefile $$(find src test \( -name '*.cpp' -o -name '*.h' -o -name '*.py' \) ); do clear; \ + $(MAKE) $$TARGET $(WATCH_ARGS); \ + done ) + # Makefile $$(find \( -name '*.cpp' -o -name '*.h' -o -name '*.py' \) -o -type d ); do clear; $(MAKE) $(patsubst watch_%,%,$@); done ) + # -r . ; do clear; $(MAKE) $(patsubst watch_%,%,$@); done +watch: watch_pyston_dbg +watch_vim: + $(MAKE) watch WATCH_ARGS='COLOR=0 USE_DISTCC=0 -j1 2>&1 | tee compile.log' +wdbg_%: + $(MAKE) $(patsubst wdbg_%,watch_dbg_%,$@) GDB_POST_CMDS="--ex quit" + +.PHONY: head_% +HEAD := 40 +head_%: + @ bash -c "set -o pipefail; script -e -q -c '$(MAKE) $(dir $@)$(patsubst head_%,%,$(notdir $@))' /dev/null | head -n$(HEAD)" +head: head_pyston_dbg +.PHONY: hwatch_% +hwatch_%: + @ $(MAKE) $(dir $@)$(patsubst hwatch_%,watch_head_%,$(notdir $@)) +hwatch: hwatch_pyston_dbg + +.PHONY: test_asm test_cpp_asm +test_asm: + $(CLANGPP_EXE) $(TEST_DIR)/test.s -c -o test_asm + objdump -d test_asm | less + @ rm test_asm +test_cpp_asm: + $(CLANGPP_EXE) $(TEST_DIR)/test.cpp -o test_asm -c -O3 -std=c++11 + # $(GPP) tests/test.cpp -o test_asm -c -O3 + objdump -d test_asm | less + rm test_asm +test_cpp_dwarf: + $(CLANGPP_EXE) $(TEST_DIR)/test.cpp -o test_asm -c -O3 -std=c++11 -g + # $(GPP) tests/test.cpp -o test_asm -c -O3 + objdump -W test_asm | less + rm test_asm +test_cpp_ll: + $(CLANGPP_EXE) $(TEST_DIR)/test.cpp -o test.ll -c -O3 -emit-llvm -S -std=c++11 + less test.ll + rm test.ll +.PHONY: bench_exceptions +bench_exceptions: + $(CLANGPP_EXE) $(TEST_DIR)/bench_exceptions.cpp -o bench_exceptions -O3 -std=c++11 + zsh -c 'ulimit -m $(MAX_MEM_KB); time ./bench_exceptions' + rm bench_exceptions + +TEST_EXT_MODULE_NAMES := basic_test descr_test slots_test +TEST_EXT_MODULE_SRCS := $(TEST_EXT_MODULE_NAMES:%=test/test_extension/%.c) +TEST_EXT_MODULE_OBJS := $(TEST_EXT_MODULE_NAMES:%=test/test_extension/%.pyston.so) + +SHAREDMODS_NAMES := _multiprocessing pyexpat future_builtins +SHAREDMODS_SRCS := \ + _multiprocessing/multiprocessing.c \ + _multiprocessing/semaphore.c \ + _multiprocessing/socket_connection.c \ + expat/xmlparse.c \ + expat/xmlrole.c \ + expat/xmltok.c \ + expat/xmltok_impl.c \ + expat/xmltok_ns.c \ + pyexpat.c \ + _elementtree.c\ + future_builtins.c +SHAREDMODS_SRCS := $(SHAREDMODS_SRCS:%=from_cpython/Modules/%) +SHAREDMODS_OBJS := $(SHAREDMODS_NAMES:%=lib_pyston/%.pyston.so) + +.PHONY: sharedmods +sharedmods: $(SHAREDMODS_OBJS) + +.PHONY: ext_pyston +ext_pyston: $(TEST_EXT_MODULE_OBJS) + +# Makefile hackery: we can build test extensions with any build configuration of pyston, +# so try to guess one that will end up being built anyway, and use that as the dependency. +ifneq ($(findstring release,$(MAKECMDGOALS))$(findstring perf,$(MAKECMDGOALS)),) +BUILD_PY:=pyston_release +else +BUILD_PY:=pyston_dbg +endif + +# Hax: we want to generate multiple targets from a single rule, and run the rule only if the +# dependencies have been updated, and only run it once for all the targets. +# So just tell make to generate the first extension module, and that the non-first ones just +# depend on the first one. +$(firstword $(TEST_EXT_MODULE_OBJS)): $(TEST_EXT_MODULE_SRCS) | $(BUILD_PY) + $(VERB) cd $(TEST_DIR)/test_extension; time ../../$(BUILD_PY) setup.py build + $(VERB) cd $(TEST_DIR)/test_extension; ln -sf $(TEST_EXT_MODULE_NAMES:%=build/lib.linux-x86_64-2.7/%.pyston.so) . + $(VERB) touch -c $(TEST_EXT_MODULE_OBJS) +$(wordlist 2,9999,$(TEST_EXT_MODULE_OBJS)): $(firstword $(TEST_EXT_MODULE_OBJS)) +$(firstword $(SHAREDMODS_OBJS)): $(SHAREDMODS_SRCS) | $(BUILD_PY) + $(VERB) cd $(TEST_DIR)/test_extension; time ../../$(BUILD_PY) ../../from_cpython/setup.py build --build-lib ../../lib_pyston + $(VERB) touch -c $(SHAREDMODS_OBJS) +$(wordlist 2,9999,$(SHAREDMODS_OBJS)): $(firstword $(SHAREDMODS_OBJS)) + +.PHONY: ext_python ext_pythondbg +ext_python: $(TEST_EXT_MODULE_SRCS) + cd $(TEST_DIR)/test_extension; $(CPYTHON) setup.py build +ext_pythondbg: $(TEST_EXT_MODULE_SRCS) + cd $(TEST_DIR)/test_extension; python2.7-dbg setup.py build + +$(FROM_CPYTHON_SRCS:.c=.o): %.o: %.c $(BUILD_SYSTEM_DEPS) + $(ECHO) Compiling C file to $@ + $(VERB) $(CC) $(EXT_CFLAGS) -c $< -o $@ -g -MMD -MP -MF $(patsubst %.o,%.d,$@) -O0 + +$(FROM_CPYTHON_SRCS:.c=.o.ll): %.o.ll: %.c $(BUILD_SYSTEM_DEPS) + $(ECHO) Compiling C file to $@ + $(VERB) $(CLANG_EXE) $(EXT_CFLAGS) -S -emit-llvm -c $< -o $@ -g -MMD -MP -MF $(patsubst %.o,%.d,$@) -O3 -g0 + +$(FROM_CPYTHON_SRCS:.c=.release.o): %.release.o: %.c $(BUILD_SYSTEM_DEPS) + $(ECHO) Compiling C file to $@ + $(VERB) $(CC) $(EXT_CFLAGS) -c $< -o $@ -g -MMD -MP -MF $(patsubst %.o,%.d,$@) + +$(FROM_CPYTHON_SRCS:.c=.prof.o): %.prof.o: %.c $(BUILD_SYSTEM_DEPS) + $(ECHO) Compiling C file to $@ + $(VERB) $(CC_PROFILE) $(EXT_CFLAGS_PROFILE) -c $< -o $@ -g -MMD -MP -MF $(patsubst %.o,%.d,$@) + +.PHONY: update_section_ordering +update_section_ordering: pyston_release + perf record -o perf_section_ordering.data -- ./pyston_release -q minibenchmarks/combined.py + perf record -o perf_section_ordering.data -- ./pyston_release -q minibenchmarks/combined.py + $(MAKE) pyston_pgo + python tools/generate_section_ordering_from_pgo_build.py pyston_pgo perf_section_ordering.data > section_ordering.txt + rm perf_section_ordering.data + + + +# TESTING: + +PLUGINS := $(wildcard plugins/*.cpp) +$(patsubst %.cpp,%.o,$(PLUGINS)): plugins/%.o: plugins/%.cpp $(BUILD_SYSTEM_DEPS) + ninja -C $(CMAKE_DIR_DBG) llvm/bin/llvm-config clangASTMatchers clangTooling LLVMLTO LLVMDebugInfoPDB LLVMLineEditor LLVMInterpreter LLVMOrcJIT llvm/bin/clang + $(CMAKE_DIR_DBG)/llvm/bin/clang $< -o $@ -std=c++11 $(shell $(LLVM_BIN_DBG)/llvm-config --cxxflags) -fno-rtti -O0 -I$(LLVM_SRC)/tools/clang/include -I$(LLVM_INC_DBG)/tools/clang/include -c + +$(patsubst %.cpp,%.so,$(PLUGINS)): plugins/%.so: plugins/%.o + $(CMAKE_DIR_DBG)/llvm/bin/clang $< -o $@ -shared -lclangASTMatchers -lclangTooling $(shell $(LLVM_BIN_DBG)/llvm-config --ldflags) + # $(CXX) $< -o $@ -lclangASTMatchers -lclangRewrite -lclangFrontend -lclangDriver -lclangTooling -lclangParse -lclangSema -lclangAnalysis -lclangAST -lclangEdit -lclangLex -lclangBasic -lclangSerialization $(shell $(LLVM_BIN_DBG)/llvm-config --ldflags --system-libs --libs all) + +$(patsubst %.cpp,%,$(PLUGINS)): plugins/%: plugins/%.o $(BUILD_SYSTEM_DEPS) + $(CXX) $< -o $@ -lclangASTMatchers -lclangRewrite -lclangFrontend -lclangDriver -lclangTooling -lclangParse -lclangSema -lclangAnalysis -lclangAST -lclangEdit -lclangLex -lclangBasic -lclangSerialization $(shell $(LLVM_BIN_DBG)/llvm-config --ldflags --system-libs --libs all) + +.PHONY: tool_test +tool_test: plugins/clang_linter.o + plugins/clang_linter test/test.cpp -- $(shell $(LLVM_BIN_DBG)/llvm-config --cxxflags) -I/usr/lib/llvm-3.5/include + +.PHONY: superlint +superlint: plugins/clang_linter.so + for fn in $(MAIN_SRCS); do $(CLANG_CXX) -Xclang -load -Xclang plugins/clang_linter.so -Xclang -plugin -Xclang pyston-linter $$fn -c -Isrc/ -Ifrom_cpython/Include -Ibuild/Debug/from_cpython/Include $(shell $(LLVM_BIN_DBG)/llvm-config --cxxflags) -no-pedantic -Wno-unused-variable -DNVALGRIND -Wno-invalid-offsetof -Wno-mismatched-tags -Wno-unused-function -Wno-unused-private-field -Wno-sign-compare || break; done + +.PHONY: lint_% +lint_%: %.cpp plugins/clang_linter.so + $(ECHO) Linting $< + $(VERB) $(CLANG_CXX) -Xclang -load -Xclang plugins/clang_linter.so -Xclang -plugin -Xclang pyston-linter src/runtime/float.cpp $< -c -Isrc/ -Ifrom_cpython/Include -Ibuild/Debug/from_cpython/Include $(shell $(LLVM_BIN_DBG)/llvm-config --cxxflags) $(COMMON_CXXFLAGS) -no-pedantic -Wno-unused-variable -DNVALGRIND -Wno-invalid-offsetof -Wno-mismatched-tags -Wno-unused-function -Wno-unused-private-field -Wno-sign-compare -DLLVMREV=$(LLVM_REVISION) -Ibuild_deps/lz4/lib -DBINARY_SUFFIX= -DBINARY_STRIPPED_SUFFIX=_stripped -Ibuild_deps/libpypa/src/ -Wno-covered-switch-default -Ibuild/Debug/libunwind/include -Wno-extern-c-compat -Wno-unused-local-typedef -Wno-inconsistent-missing-override + +refcount_checker: + $(NINJA) -C $(CMAKE_DIR_DBG) refcount_checker + +.PHONY: clang_lint +clang_lint: $(foreach FN,$(MAIN_SRCS),$(dir $(FN))lint_$(notdir $(FN:.cpp=))) + +# 'make package' will build a package using the pgo build, since that's the +# configuration with the best performance. Testing that is a pain since it +# requires rerunning the pgo build, so there's also 'make package_nonpgo' mostly +# for testing. +package: pyston_pgo + $(NINJA) -C $(CMAKE_DIR_RELEASE_GCC_PGO) package + +package_nonpgo: + $(NINJA) -C $(CMAKE_DIR_RELEASE) package diff --git a/Makefile.llvmconfig b/Makefile.llvmconfig new file mode 100644 index 000000000..99c3214a5 --- /dev/null +++ b/Makefile.llvmconfig @@ -0,0 +1,3 @@ +# included by Makefile + +LLVM_CONFIGURE_LINE := CXX=$(GPP) LDFLAGS="-Wl,-rpath,$(GCC_DIR)/lib64" $(LLVM_SRC)/configure --enable-targets=host --with-gcc-toolchain=$(GCC_DIR) --disable-bindings diff --git a/README b/README deleted file mode 100644 index fbedccbcd..000000000 --- a/README +++ /dev/null @@ -1,184 +0,0 @@ -# Pyston - -Pyston is a new, under-development Python implementation built using LLVM and modern JIT techniques with the goal of achieving good performance. - -### Current state - -Pyston "works", though doesn't support very much of the Python language, and currently is not very useful for end-users. - -Pyston currently targets Python 2.7, and only runs on x86_64 platforms, and has only been tested on Ubuntu. - -Benchmarks are not currently that meaningful since the supported set of benchmarks is too small to be representative; with that caveat, Pyston seems to have better performance than CPython but lags behind PyPy. - -### Getting started - -To get a full development environment for Pyston, you need pretty recent versions of various tools, since self-modifying code tends to be less well supported. The docs/INSTALLING file contains information about what the tools are, how to get them, and how to install them; currently it can take up to an hour to get them all built on a quad-core machine. - -To simply build and run Pyston, a smaller set of dependencies is required; see docs/INSTALLING, but skip the "OPTIONAL DEPENDENCIES" section. Once all the dependencies are installed, you should be able to do -``` -$ make test -j4 -``` - -And see that hopefully all of the tests will pass. - -### Running Pyston - -Pyston builds in a few different configurations; right now there is "pyston_dbg", which is the debug configuration and contains assertions and debug symbols, and "pyston", the release configuration which has no assertions or debug symbols, and has full optimizations. You can build them by saying `make pyston_dbg` or `make pyston`, respectively. If you are interested in seeing how fast Pyston can go, you should try the release configuration, but there is a good chance that it will crash, in which case you can run the debug configuration to see what is happening. - -> There are a number of other configurations useful for development: "pyston_debug" contains full LLVM debug information, but can be over 100MB. "pyston_prof" contains gprof-style profiling instrumentation; gprof can't profile JIT'd code, reducing it's usefulness in this case, but the configuration has stuck around since it gets compiled with gcc, and can expose issues with the normal clang-based build. - -You can get a simple REPL by simply typing `./pyston`; it is not very robust right now, and only supports single-line statements, but can give you an interactive view into how Pyston works. To get more functionality, you can do `./pyston -i [your_source_file.py]`, which will go into the REPL after executing the given file, letting you access all the variables you had defined. - -#### Command-line options: -
-
-n
-
Disable the Pyston interpreter. The interpreter doesn't support certain features, such as inline caches, so disabling it can expose additional bugs.
- -
-O
-
Force Pyston to always run at the highest compilation tier. This doesn't always produce the fastest running time due to the lack of type recording from lower compilation tiers, but can help stress-test the code generator.
- -
-q
-
Set verbosity to 0
-
-v
-
Increase verbosity by 1
- -
Pyston by default runs at verbosity 1, which contains a good amount of debugging information. Verbosity 0 contains no debugging information (such as the LLVM IR generated) and should produce the same results as other runtimes.
- -
-d
-
In addition to showing the generated LLVM IR, show the generated assembly code.
- -
-i
-
Go into the repl after executing the given script.
- -
-b
-
Benchmark mode: do whatever it would have been done, but do it 1000 times.
- -
-p
-
Emit profiling information: at exit, Pyston will emit a dump of the code it generated for consumption by other tools.
- -
-r
-
Use a stripped stdlib. When running pyston_dbg, the default is to use a stdlib with full debugging symbols enabled. Passing -r changes this behavior to load a slimmer, stripped stdlib.
- -### Version History - -##### v0.1: 4/2/2014 - -Initial Release. -- Working system; proof of concept of major JIT techniques. -- Fairly promising initial performance, though not fully validated. -- Missing large parts of the language - - Exceptions (planned for 0.2) - - Class inheritance (planned for 0.2) - - Default arguments, keywords, starargs, kwargs (planned for 0.2) - - Generators (planned for 0.2) - - Integer promotion (planned for 0.2) - - Threads - ---- -## Technical features - -### Compilation tiers - -Pyston currently features four compilation tiers. In increasing order of speed, but also compilation time: -1. An LLVM-IR interpreter. LLVM IR is not designed for interpretation, and isn't very well suited for the task -- it is too low level, and the interpreter spends too much time dispatching for each instruction. The interpreter is currently used for the first three times that a function is called, or the first ten iterations of a loop, before switching to the next level. -2. Baseline LLVM compilation. Runs no LLVM optimizations, and no type speculation, and simply hands off the generated code to the LLVM code generator. This tier does type recording for the final tier. -3. Improved LLVM compilation. Behaves very similarly to baseline LLVM compilation, so this tier will probably be removed in the near future. -4. Full LLVM optimization + compilation. This tier runs full LLVM optimizations, and uses type feedback from lower tiers. This tier kicks in after 10000 loop iterations, or 10000 calls to a function. (exact numbers subject to change). - -There are two main ways that Pyston can move up to higher tiers: -- If a function gets called often, it will get recompiled at a higher tier and the new version will be called instead. -- If a loop gets iterated enough times, Pyston will OSR to a higher tier within the same function. - -Currently Pyston only moves to higher tiers, and doesn't move back down to lower tiers. This will be important to add, in order to support doing additional type recording if types change. - -The current plan is to replace the interpreter with a quick code generator that doesn't use LLVM's machinery; in theory it should be possible to build a simple code generator that just uses the LLVM IR as an input. - -#### OSR - -Pyston uses OSR (which stands for On-Stack Replacement, though Pyston does not use that particular mechanism) to move up to a higher tier while inside a function -- this can be important for functions that are expensive the very first time they are called. - -OSR is implemented in Pyston by keeping a count, per backedge, of the number of times that the backedge is taken. Once a certain threshold is reached (currently 10 for the interpreter, 10000 otherwise), Pyston will compile a special OSR-entry version of the function. This function takes as arguments all the local variables for that point in the program, and continues execution where the previous function left off. - -For example, this Python function: -```python -def square(n): - r = 0 - for i in xrange(n): - r += n -``` -Will get translated to something similar to: -```C -static _backedge_trip_count = 0; -int square(int n) { - int r = 0; - for (int i = 0; i < n; i++) { - r += n; - - // OSR exit here: - _backedge_trip_count++; - if (_backedge_trip_count >= 10000) { - auto osr_entry = compileOsrEntry(); - return osr_entry(n, i, r); - } - } - return r; -} -``` - -The compiled OSR entry will look something similar to: -```C -int square_osrentry(int n, int i, int r) { - for (; i < n; i++) { - r += n; - } - return r; -} -``` - -The pseudo-C shown above doesn't look that different; the benefit of this approach is that the square() function can be compiled at a low compilation tier, but the square_osrentry can be compiled at a higher one since the compilation time is much more likely to pay off. - -This approach seems to work, but has a couple drawbacks: -- It's currently tracked per backedge rather than per backedge-target, which can lead to more OSR compilations than necessary. -- The OSR'd version can be slower due to the optimizations having less context about the source of the arguments, ie that they're local variables that haven't escaped. - -### Inlining - -Pyston can inline functions from its runtime into the code that it's JIT'ing. This only happens if, at JIT time, it can guarantee the runtime function that would end up getting called, which typically happens if it is an attribute of a guaranteed type. For instance, `[].append()` will end up resolving to the internal listAppend(), since we know what the type of `[]` is. - -Once the Python-level call is resolved to a C-level call to a runtime function, normal inlining heuristics kick in to determine if it is profitable to inline the function. As a side note, the inlining is only possible because the LLVM IR for the runtime is not only compiled to machine code to be run, but also directly embedded as LLVM IR into the pyston binary, so that the LLVM IR can be inlined. - -### Object representation - -Current Pyston uses an 'everything is boxed' model. It has some ability to deal with unboxed variants of ints, floats, and bools, but those unboxed types are not mixable with boxed types. ie if you put an integer into a list, the integer will always get boxed first. - -### Inline caches - -### Hidden classes - -### Type feedback - -Currently, tiers 2 and 3 support *type recording*, and make a record of the types seen at specifically-designated parts of the program. - -Tier 4 then looks at the type record; the current heuristic is that if the same type has been seen 100 times in a row, the compiler will speculate - -### Garbage collection - -Pyston currently utilizes a *conservative* garbage collector -- this means that GC roots aren't tracked directly, but rather all GC-managed memory is scanned for values that could point into the GC heap, and treat those conservatively as pointers that keep the pointed-to GC memory alive. - -Currently, the Pyston's GC is a non-copying, non-generational, stop-the-world GC. ie it is a simple implementation that will need to be improved in the future. - -### Aspiration: Extension modules - -CPython-style C extension modules are difficult to support efficiently in alternative Python implementations, especially non-refcounted ones. One of the most prominant overheads is the conversion between a refcounted C API and a GC-managed runtime that has to implement it. - -My hope is that by applying a conservative GC to the extension modules, the refcounting code can be eliminated and the conservative GC will be able to find the roots. Whether or not this works, and is performant, remains to be seen. - -### Aspiration: Thread-level Parallelism - -Many runtimes for dynamic languages -- including CPython and PyPy -- use a Global Interpreter Lock (GIL) to protect internal structures against concurrent modification. This works, but has the drawback of only allowing one thread at a time to run. - -The number of cores you can obtain in a single machine keeps growing, which means the performance deficit of single-threaded programs is falling vs multi-threaded ones. There has been some work to support multi-process parallelism in Python, though many people prefer multi-threaded paralellism for its (relative) ease of use. - -We have no concrete ideas or plans for how to implement this, so this section is all optimistic, but our hope is that it will be possible to implement true parallelism. - -One of the biggest challenges for this is not just protecting the internal runtime structures, but also providing the higher-level guarantees that Python programmers have become accustomed to. One example is that all builtin datastructures must be thread-safe, since they currently are. A slightly more sinister one is that Python has a very straightforward memory model, where no operations can be viewed in different orders on different threads, because all thread switching involves a lock release-then-acquire which serializes the memory accesses; performantly maintaining this memory model is likely to be a challenge. diff --git a/README.md b/README.md new file mode 100644 index 000000000..5884f4b41 --- /dev/null +++ b/README.md @@ -0,0 +1,123 @@ +# Pyston [![Build Status](https://travis-ci.org/dropbox/pyston.svg?branch=master)](https://travis-ci.org/dropbox/pyston/builds) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/dropbox/pyston?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +Pyston is a performance-oriented Python implementation built using LLVM and modern JIT techniques. For a high-level description of how it works, please check out our [technical overview](https://github.com/dropbox/pyston/wiki/Technical-overview) or our [FAQ](https://github.com/dropbox/pyston/wiki/FAQ) for some more details. + +We have a small website [pyston.org](http://pyston.org/), which for now just hosts the blog. We have two mailing lists: [pyston-dev@](http://lists.pyston.org/cgi-bin/mailman/listinfo/pyston-dev) for development-related discussions, and [pyston-announce@](http://lists.pyston.org/cgi-bin/mailman/listinfo/pyston-announce) which is for wider announcements (new releases, major project changes). We also have a [gitter chat room](https://gitter.im/dropbox/pyston) where most discussion takes place. + +### Current state + +Pyston should be considered in alpha: it "works" in that it can successfully run Python code, but it is still quite far from being useful for end-users. + +Currently, Pyston targets Python 2.7, only runs on x86_64 platforms, and only has been tested on Ubuntu. Support for more platforms -- along with Python 3 compatibility -- is desired but deferred until we feel successful on our initial platform. Pyston does not currently work on Mac OSX, and it is not clear when it will. + +##### Contributing + +Pyston welcomes any kind of contribution; please see [CONTRIBUTING.md](https://github.com/dropbox/pyston/blob/master/CONTRIBUTING.md) for details. +> tl;dr: You will need to sign the [Dropbox CLA](https://opensource.dropbox.com/cla/) and run the tests. + +We have some documentation for those interested in contributing: see our [Development Guide](https://github.com/dropbox/pyston/wiki/Development-Guide) and [development tips](docs/TIPS.md). + +### Roadmap + +##### v0.5: Coming soon +- Focus is on being ready to run Dropbox's production services. Initial plan: + - Support for a final few esoteric Python features + - Better stability and performance (but particularly for the Dropbox servers) + +##### v0.4: [released 11/3/2015](http://blog.pyston.org/2015/11/03/102/) +- Many new features and better language support + - Passes most of the tests of several famous python libraries + - Unicode support + - GC finalizer + - much improved C API support +- Better performance + - Custom C++ exception handler + - Object Cache (improves startup time) + - Baseline JIT + +##### v0.3: [released 2/24/2015](http://blog.pyston.org/2015/02/24/pyston-0-3-self-hosting-sufficiency/) +- Better language support + - Can self-host all of our internal Python scripts +- Better performance + - Match CPython's performance on our small benchmark suite + +##### v0.2: [released 9/11/2014](http://blog.pyston.org/2014/09/11/9/) +- Focus was on improving language compatibility to the point that we can start running "real code" in the form of existing benchmarks. +- Many new features: + - Exceptions + - Class inheritance, metaclasses + - Basic native C API support + - Closures, generators, lambdas, generator expressions + - Default arguments, keywords, \*args, \*\*kwargs + - Longs, and integer promotion + - Multithreading support +- We have allowed performance to regress, sometimes considerably, but (hopefully) in places that allow for more efficient implementations as we have time. + +##### v0.1: [released 4/2/2014](https://tech.dropbox.com/2014/04/introducing-pyston-an-upcoming-jit-based-python-implementation/) +- Focus was on building and validating the core Python-to-LLVM JIT infrastructure. +- Many core parts of the language were missing. + +### Trying it out + +We have some build instructions at [INSTALLING.md](https://github.com/dropbox/pyston/blob/master/docs/INSTALLING.md). If you have any issues, please feel free to file an issue in the issue tracker, or mention it via email or gitter. + +Once you've followed those instructions, you should be able to do +``` +$ make check +``` + +And see that hopefully all of the tests pass. (If they don't, please let us know.) + +All pull requests are built and tested by travis-ci.org running Ubuntu 12.04. +See [travis-ci.org/dropbox/pyston/builds](https://travis-ci.org/dropbox/pyston/builds). + +### Running Pyston + +Pyston builds in a few different configurations; right now there is `pyston_dbg`, which is the debug configuration and contains assertions and debug symbols, and `pyston_release`, the release configuration which has no assertions or debug symbols, and has full optimizations. You can build them by saying `make pyston_dbg` or `make pyston_release`, respectively. If you are interested in seeing how fast Pyston can go, you should try the release configuration, but there is a good chance that it will crash, in which case you can run the debug configuration to see what is happening. + +> There are a number of other configurations useful for development: "pyston_debug" contains full LLVM debug information, but will weigh in at a few hundred MB. "pyston_prof" contains gprof-style profiling instrumentation; gprof can't profile JIT'd code, reducing it's usefulness in this case, but the configuration has stuck around since it gets compiled with gcc, and can expose issues with the normal clang-based build. + +You can get a simple REPL by simply typing `make run`; it is not very robust right now, and only supports single-line statements, but can give you an interactive view into how Pyston works. To get more functionality, you can do `./pyston_dbg -i [your_source_file.py]`, which will go into the REPL after executing the given file, letting you access all the variables you had defined. + +#### Makefile targets + +- `make pyston_release`: to compile in release mode and generate the `pyston_release` executable +- `make check`: run the tests +- `make run`: run the REPL +- `make format`: run clang-format over the codebase + +For more, see [development tips](docs/TIPS.md) + +#### Pyston command-line options: + +Pyston-specific flags: +
+
-q
+
Set verbosity to 0
+
-v
+
Increase verbosity by 1
+ +
-s
+
Print out the internal stats at exit.
+ +
-n
+
Disable the Pyston interpreter. This is mostly used for debugging, to force the use of higher compilation tiers in situations they wouldn't typically be used.
+ +
-O
+
Force Pyston to always run at the highest compilation tier. This doesn't always produce the fastest running time due to the lack of type recording from lower compilation tiers, but similar to -n can help test the code generator.
+ +
-I
+
Force always using the Pyston interpreter. This is mostly used for debugging / testing. (Takes precedence over -n and -O)
+ +
-r
+
Use a stripped stdlib. When running pyston_dbg, the default is to use a stdlib with full debugging symbols enabled. Passing -r changes this behavior to load a slimmer, stripped stdlib.
+ +
-x
+
Disable the pypa parser.
+ +Standard Python flags: +
-i
+
Go into the repl after executing the given script.
+
+ +There are also some lesser-used flags; see src/jit.cpp for more details. diff --git a/build_deps/jemalloc b/build_deps/jemalloc new file mode 160000 index 000000000..46c0af68b --- /dev/null +++ b/build_deps/jemalloc @@ -0,0 +1 @@ +Subproject commit 46c0af68bd248b04df75e4f92d5fb804c3d75340 diff --git a/build_deps/libpypa b/build_deps/libpypa new file mode 160000 index 000000000..40eb32aa8 --- /dev/null +++ b/build_deps/libpypa @@ -0,0 +1 @@ +Subproject commit 40eb32aa846b21e122cfa1f10bf8972e67de14c8 diff --git a/build_deps/libunwind b/build_deps/libunwind new file mode 160000 index 000000000..65ac86741 --- /dev/null +++ b/build_deps/libunwind @@ -0,0 +1 @@ +Subproject commit 65ac86741606e1d87aef75755c699e4fa6884230 diff --git a/build_deps/lz4 b/build_deps/lz4 new file mode 160000 index 000000000..160661c7a --- /dev/null +++ b/build_deps/lz4 @@ -0,0 +1 @@ +Subproject commit 160661c7a4cbf805f4af74d2e3932a17a66e6ce7 diff --git a/clang_patches/0001-Rename-one-of-scan-builds-internal-environment-varia.patch b/clang_patches/0001-Rename-one-of-scan-builds-internal-environment-varia.patch new file mode 100644 index 000000000..816da139f --- /dev/null +++ b/clang_patches/0001-Rename-one-of-scan-builds-internal-environment-varia.patch @@ -0,0 +1,46 @@ +From b1b7a1de4895a5d821592f0221c3ae0b14200ff1 Mon Sep 17 00:00:00 2001 +From: Kevin Modzelewski +Date: Tue, 27 May 2014 17:03:25 -0700 +Subject: [PATCH] Rename one of scan-builds internal environment variables to not conflict + +--- + tools/scan-build/ccc-analyzer | 2 +- + tools/scan-build/scan-build | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/tools/scan-build/ccc-analyzer b/tools/scan-build/ccc-analyzer +index b5445e6..65f8455 100755 +--- a/tools/scan-build/ccc-analyzer ++++ b/tools/scan-build/ccc-analyzer +@@ -43,5 +43,5 @@ if ($FindBin::Script =~ /c\+\+-analyzer/) { + +- $Clang = $ENV{'CLANG_CXX'}; ++ $Clang = $ENV{'CCC_CLANG_CXX'}; + if (!defined $Clang || ! -x $Clang) { $Clang = 'clang++'; } + + $IsCXX = 1 +diff --git a/tools/scan-build/scan-build b/tools/scan-build/scan-build +index 31dbfb4..9a3c9e8 100755 +--- a/tools/scan-build/scan-build ++++ b/tools/scan-build/scan-build +@@ -884,7 +884,7 @@ sub AddIfNotPresent { + + sub SetEnv { + my $Options = shift @_; +- foreach my $opt ('CC', 'CXX', 'CLANG', 'CLANG_CXX', ++ foreach my $opt ('CC', 'CXX', 'CLANG', 'CCC_CLANG_CXX', + 'CCC_ANALYZER_ANALYSIS', 'CCC_ANALYZER_PLUGINS', + 'CCC_ANALYZER_CONFIG') { + die "$opt is undefined\n" if (!defined $opt); +@@ -1653,7 +1653,7 @@ my %Options = ( + 'CC' => $Cmd, + 'CXX' => $CmdCXX, + 'CLANG' => $Clang, +- 'CLANG_CXX' => $ClangCXX, ++ 'CCC_CLANG_CXX' => $ClangCXX, + 'VERBOSE' => $Verbose, + 'CCC_ANALYZER_ANALYSIS' => $CCC_ANALYZER_ANALYSIS, + 'CCC_ANALYZER_PLUGINS' => $CCC_ANALYZER_PLUGINS, +-- +1.7.4.1 + diff --git a/clang_patches/LICENSE b/clang_patches/LICENSE new file mode 100644 index 000000000..dd38ca47a --- /dev/null +++ b/clang_patches/LICENSE @@ -0,0 +1,43 @@ +============================================================================== +LLVM Release License +============================================================================== +University of Illinois/NCSA +Open Source License + +Copyright (c) 2003-2014 University of Illinois at Urbana-Champaign. +All rights reserved. + +Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal with +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at + Urbana-Champaign, nor the names of its contributors may be used to + endorse or promote products derived from this Software without specific + prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE +SOFTWARE. diff --git a/cmake/CheckTypeSizeof.c.in b/cmake/CheckTypeSizeof.c.in new file mode 100644 index 000000000..157cad461 --- /dev/null +++ b/cmake/CheckTypeSizeof.c.in @@ -0,0 +1,42 @@ +/* Pyston: This file is included from cmake 3 for compatability + * downloaded 2015-04-03 + * https://github.com/Kitware/CMake/blob/master/Modules/CheckTypeSize.c.in + */ + +@headers@ + +#undef KEY +#if defined(__i386) +# define KEY '_','_','i','3','8','6' +#elif defined(__x86_64) +# define KEY '_','_','x','8','6','_','6','4' +#elif defined(__ppc__) +# define KEY '_','_','p','p','c','_','_' +#elif defined(__ppc64__) +# define KEY '_','_','p','p','c','6','4','_','_' +#endif + +#define SIZE (sizeof(@type@)) +char info_size[] = {'I', 'N', 'F', 'O', ':', 's','i','z','e','[', + ('0' + ((SIZE / 10000)%10)), + ('0' + ((SIZE / 1000)%10)), + ('0' + ((SIZE / 100)%10)), + ('0' + ((SIZE / 10)%10)), + ('0' + (SIZE % 10)), + ']', +#ifdef KEY + ' ','k','e','y','[', KEY, ']', +#endif + '\0'}; + +#ifdef __CLASSIC_C__ +int main(argc, argv) int argc; char *argv[]; +#else +int main(int argc, char *argv[]) +#endif +{ + int require = 0; + require += info_size[argc]; + (void)argv; + return require; +} diff --git a/cmake/CheckTypeSizeof.cmake b/cmake/CheckTypeSizeof.cmake new file mode 100644 index 000000000..f5217684c --- /dev/null +++ b/cmake/CheckTypeSizeof.cmake @@ -0,0 +1,278 @@ +#.rst: +# CheckTypeSizeof +# ------------- +# +# Pyston: This file is included from cmake 3 for compatability +# downloaded 2015-04-03 +# https://github.com/Kitware/CMake/blob/master/Modules/CheckTypeSize.cmake +# +# +# Check sizeof a type +# +# :: +# +# CHECK_TYPE_SIZEOF(TYPE VARIABLE [BUILTIN_TYPES_ONLY] +# [LANGUAGE ]) +# +# Check if the type exists and determine its size. On return, +# "HAVE_${VARIABLE}" holds the existence of the type, and "${VARIABLE}" +# holds one of the following: +# +# :: +# +# = type has non-zero size +# "0" = type has arch-dependent size (see below) +# "" = type does not exist +# +# Both ``HAVE_${VARIABLE}`` and ``${VARIABLE}`` will be created as internal +# cache variables. +# +# Furthermore, the variable "${VARIABLE}_CODE" holds C preprocessor code +# to define the macro "${VARIABLE}" to the size of the type, or leave +# the macro undefined if the type does not exist. +# +# The variable "${VARIABLE}" may be "0" when CMAKE_OSX_ARCHITECTURES has +# multiple architectures for building OS X universal binaries. This +# indicates that the type size varies across architectures. In this +# case "${VARIABLE}_CODE" contains C preprocessor tests mapping from +# each architecture macro to the corresponding type size. The list of +# architecture macros is stored in "${VARIABLE}_KEYS", and the value for +# each key is stored in "${VARIABLE}-${KEY}". +# +# If the BUILTIN_TYPES_ONLY option is not given, the macro checks for +# headers , , and , and saves results +# in HAVE_SYS_TYPES_H, HAVE_STDINT_H, and HAVE_STDDEF_H. The type size +# check automatically includes the available headers, thus supporting +# checks of types defined in the headers. +# +# If LANGUAGE is set, the specified compiler will be used to perform the +# check. Acceptable values are C and CXX +# +# Despite the name of the macro you may use it to check the size of more +# complex expressions, too. To check e.g. for the size of a struct +# member you can do something like this: +# +# :: +# +# check_type_sizeof("((struct something*)0)->member" SIZEOF_MEMBER) +# +# +# +# The following variables may be set before calling this macro to modify +# the way the check is run: +# +# :: +# +# CMAKE_REQUIRED_FLAGS = string of compile command line flags +# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar) +# CMAKE_REQUIRED_INCLUDES = list of include directories +# CMAKE_REQUIRED_LIBRARIES = list of libraries to link +# CMAKE_REQUIRED_QUIET = execute quietly without messages +# CMAKE_EXTRA_INCLUDE_FILES = list of extra headers to include + +#============================================================================= +# Copyright 2002-2009 Kitware, Inc. +# +# Distributed under the OSI-approved BSD License (the "License"); +# see accompanying file Copyright.txt for details. +# +# This software is distributed WITHOUT ANY WARRANTY; without even the +# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +# See the License for more information. +#============================================================================= +# (To distribute this file outside of CMake, substitute the full +# License text for the above reference.) + +include(CheckIncludeFile) +include(CheckIncludeFileCXX) + +cmake_policy(PUSH) +cmake_policy(VERSION 2.8) + +get_filename_component(__check_type_sizeof_dir "${CMAKE_CURRENT_LIST_FILE}" PATH) + +#----------------------------------------------------------------------------- +# Helper function. DO NOT CALL DIRECTLY. +function(__check_type_sizeof_impl type var map builtin language) + if(NOT CMAKE_REQUIRED_QUIET) + message(STATUS "Check size of ${type}") + endif() + + # Include header files. + set(headers) + if(builtin) + if(HAVE_SYS_TYPES_H) + set(headers "${headers}#include \n") + endif() + if(HAVE_STDINT_H) + set(headers "${headers}#include \n") + endif() + if(HAVE_STDDEF_H) + set(headers "${headers}#include \n") + endif() + endif() + foreach(h ${CMAKE_EXTRA_INCLUDE_FILES}) + set(headers "${headers}#include \"${h}\"\n") + endforeach() + + # Perform the check. + + if("${language}" STREQUAL "C") + set(src ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CheckTypeSizeof/${var}.c) + elseif("${language}" STREQUAL "CXX") + set(src ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CheckTypeSizeof/${var}.cpp) + else() + message(FATAL_ERROR "Unknown language:\n ${language}\nSupported languages: C, CXX.\n") + endif() + set(bin ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CheckTypeSizeof/${var}.bin) + configure_file(${__check_type_sizeof_dir}/CheckTypeSizeof.c.in ${src} @ONLY) + try_compile(HAVE_${var} ${CMAKE_BINARY_DIR} ${src} + COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} + LINK_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} + CMAKE_FLAGS + "-DCOMPILE_DEFINITIONS:STRING=${CMAKE_REQUIRED_FLAGS}" + "-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}" + OUTPUT_VARIABLE output + COPY_FILE ${bin} + ) + + if(HAVE_${var}) + # The check compiled. Load information from the binary. + file(STRINGS ${bin} strings LIMIT_COUNT 10 REGEX "INFO:size") + + # Parse the information strings. + set(regex_size ".*INFO:size\\[0*([^]]*)\\].*") + set(regex_key " key\\[([^]]*)\\]") + set(keys) + set(code) + set(mismatch) + set(first 1) + foreach(info ${strings}) + if("${info}" MATCHES "${regex_size}") + # Get the type size. + set(size "${CMAKE_MATCH_1}") + if(first) + set(${var} ${size}) + elseif(NOT "${size}" STREQUAL "${${var}}") + set(mismatch 1) + endif() + set(first 0) + + # Get the architecture map key. + string(REGEX MATCH "${regex_key}" key "${info}") + string(REGEX REPLACE "${regex_key}" "\\1" key "${key}") + if(key) + set(code "${code}\nset(${var}-${key} \"${size}\")") + list(APPEND keys ${key}) + endif() + endif() + endforeach() + + # Update the architecture-to-size map. + if(mismatch AND keys) + configure_file(${__check_type_sizeof_dir}/CheckTypeSizeMap.cmake.in ${map} @ONLY) + set(${var} 0) + else() + file(REMOVE ${map}) + endif() + + if(mismatch AND NOT keys) + message(SEND_ERROR "CHECK_TYPE_SIZEOF found different results, consider setting CMAKE_OSX_ARCHITECTURES or CMAKE_TRY_COMPILE_OSX_ARCHITECTURES to one or no architecture !") + endif() + + if(NOT CMAKE_REQUIRED_QUIET) + message(STATUS "Check size of ${type} - done") + endif() + file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log + "Determining size of ${type} passed with the following output:\n${output}\n\n") + set(${var} "${${var}}" CACHE INTERNAL "CHECK_TYPE_SIZEOF: sizeof(${type})") + else() + # The check failed to compile. + if(NOT CMAKE_REQUIRED_QUIET) + message(STATUS "Check size of ${type} - failed") + endif() + file(READ ${src} content) + file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log + "Determining size of ${type} failed with the following output:\n${output}\n${src}:\n${content}\n\n") + set(${var} "" CACHE INTERNAL "CHECK_TYPE_SIZEOF: ${type} unknown") + file(REMOVE ${map}) + endif() +endfunction() + +#----------------------------------------------------------------------------- +macro(CHECK_TYPE_SIZEOF TYPE VARIABLE) + # parse arguments + unset(doing) + foreach(arg ${ARGN}) + if("x${arg}" STREQUAL "xBUILTIN_TYPES_ONLY") + set(_CHECK_TYPE_SIZEOF_${arg} 1) + unset(doing) + elseif("x${arg}" STREQUAL "xLANGUAGE") # change to MATCHES for more keys + set(doing "${arg}") + set(_CHECK_TYPE_SIZEOF_${doing} "") + elseif("x${doing}" STREQUAL "xLANGUAGE") + set(_CHECK_TYPE_SIZEOF_${doing} "${arg}") + unset(doing) + else() + message(FATAL_ERROR "Unknown argument:\n ${arg}\n") + endif() + endforeach() + if("x${doing}" MATCHES "^x(LANGUAGE)$") + message(FATAL_ERROR "Missing argument:\n ${doing} arguments requires a value\n") + endif() + if(DEFINED _CHECK_TYPE_SIZEOF_LANGUAGE) + if(NOT "x${_CHECK_TYPE_SIZEOF_LANGUAGE}" MATCHES "^x(C|CXX)$") + message(FATAL_ERROR "Unknown language:\n ${_CHECK_TYPE_SIZEOF_LANGUAGE}.\nSupported languages: C, CXX.\n") + endif() + set(_language ${_CHECK_TYPE_SIZEOF_LANGUAGE}) + else() + set(_language C) + endif() + + # Optionally check for standard headers. + if(_CHECK_TYPE_SIZEOF_BUILTIN_TYPES_ONLY) + set(_builtin 0) + else() + set(_builtin 1) + if("${_language}" STREQUAL "C") + check_include_file(sys/types.h HAVE_SYS_TYPES_H) + check_include_file(stdint.h HAVE_STDINT_H) + check_include_file(stddef.h HAVE_STDDEF_H) + elseif("${_language}" STREQUAL "CXX") + check_include_file_cxx(sys/types.h HAVE_SYS_TYPES_H) + check_include_file_cxx(stdint.h HAVE_STDINT_H) + check_include_file_cxx(stddef.h HAVE_STDDEF_H) + endif() + endif() + unset(_CHECK_TYPE_SIZEOF_BUILTIN_TYPES_ONLY) + unset(_CHECK_TYPE_SIZEOF_LANGUAGE) + + # Compute or load the size or size map. + set(${VARIABLE}_KEYS) + set(_map_file ${CMAKE_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/CheckTypeSizeof/${VARIABLE}.cmake) + if(NOT DEFINED HAVE_${VARIABLE}) + __check_type_sizeof_impl(${TYPE} ${VARIABLE} ${_map_file} ${_builtin} ${_language}) + endif() + include(${_map_file} OPTIONAL) + set(_map_file) + set(_builtin) + + # Create preprocessor code. + if(${VARIABLE}_KEYS) + set(${VARIABLE}_CODE) + set(_if if) + foreach(key ${${VARIABLE}_KEYS}) + set(${VARIABLE}_CODE "${${VARIABLE}_CODE}#${_if} defined(${key})\n# define ${VARIABLE} ${${VARIABLE}-${key}}\n") + set(_if elif) + endforeach() + set(${VARIABLE}_CODE "${${VARIABLE}_CODE}#else\n# error ${VARIABLE} unknown\n#endif") + set(_if) + elseif(${VARIABLE}) + set(${VARIABLE}_CODE "#define ${VARIABLE} ${${VARIABLE}}") + else() + set(${VARIABLE}_CODE "/* #undef ${VARIABLE} */") + endif() +endmacro() + +#----------------------------------------------------------------------------- +cmake_policy(POP) diff --git a/cmake/FindValgrind.cmake b/cmake/FindValgrind.cmake new file mode 100644 index 000000000..d24b22f0e --- /dev/null +++ b/cmake/FindValgrind.cmake @@ -0,0 +1,10 @@ +# find valgrind header and binary +include(FindPackageHandleStandardArgs) + +find_path(VALGRIND_INCLUDE_DIR + NAMES valgrind.h + PATHS /usr/include /usr/include/valgrind ${VALGRIND_DIR}/include/valgrind) + +find_program(VALGRIND_BIN NAMES valgrind PATHS /usr/bin ${VALGRIND_DIR}/bin) + +find_package_handle_standard_args(valgrind REQUIRED_VARS VALGRIND_BIN VALGRIND_INCLUDE_DIR) diff --git a/cmake/pyconfig.cmake b/cmake/pyconfig.cmake new file mode 100644 index 000000000..f94742fe6 --- /dev/null +++ b/cmake/pyconfig.cmake @@ -0,0 +1,71 @@ +# generate pyconfig.h + +include(CheckIncludeFiles) +include(CheckTypeSizeof) +include(CheckSymbolExists) + +set(CMAKE_EXTRA_INCLUDE_FILES unordered_map) +set(CMAKE_REQUIRED_FLAGS -std=c++11) +check_type_sizeof("std::unordered_map" SIZEOF_UNORDEREDMAP LANGUAGE CXX) +set(CMAKE_EXTRA_INCLUDE_FILES) +set(CMAKE_REQUIRED_FLAGS) + +check_include_files(alloca.h HAVE_ALLOCA_H) +check_include_files(asm/types.h HAVE_ASM_TYPES_H) +check_include_files(curses.h HAVE_CURSES_H) +check_include_files(dirent.h HAVE_DIRENT_H) +check_include_files(dlfcn.h HAVE_DLFCN_H) +check_include_files(errno.h HAVE_ERRNO_H) +check_include_files(fcntl.h HAVE_FCNTL_H) +check_include_files(grp.h HAVE_GRP_H) +check_include_files(inttypes.h HAVE_INTTYPES_H) +check_include_files(langinfo.h HAVE_LANGINFO_H) +check_include_files(libintl.h HAVE_LIBINTL_H) +check_include_files(linux/netlink.h HAVE_LINUX_NETLINK_H) +check_include_files(linux/tipc.h HAVE_LINUX_TIPC_H) +check_include_files(memory.h HAVE_MEMORY_H) +check_include_files(ncurses.h HAVE_NCURSES_H) +check_include_files(netpacket/packet.h HAVE_NETPACKET_PACKET_H) +check_include_files(poll.h HAVE_POLL_H) +check_include_files(pthread.h HAVE_PTHREAD_H) +check_include_files(pty.h HAVE_PTY_H) +check_include_files(shadow.h HAVE_SHADOW_H) +check_include_files(signal.h HAVE_SIGNAL_H) +check_include_files(spawn.h HAVE_SPAWN_H) +check_include_files(stdint.h HAVE_STDINT_H) +check_include_files(stdlib.h HAVE_STDLIB_H) +check_include_files(string.h HAVE_STRING_H) +check_include_files(strings.h HAVE_STRINGS_H) +check_include_files(stropts.h HAVE_STROPTS_H) +check_include_files(sys/epoll.h HAVE_SYS_EPOLL_H) +check_include_files(sys/file.h HAVE_SYS_FILE_H) +check_include_files(sys/param.h HAVE_SYS_PARAM_H) +check_include_files(sys/poll.h HAVE_SYS_POLL_H) +check_include_files(sys/resource.h HAVE_SYS_RESOURCE_H) +check_include_files(sys/select.h HAVE_SYS_SELECT_H) +check_include_files(sys/socket.h HAVE_SYS_SOCKET_H) +check_include_files(sys/stat.h HAVE_SYS_STAT_H) +check_include_files(sys/statvfs.h HAVE_SYS_STATVFS_H) +check_include_files(sys/time.h HAVE_SYS_TIME_H) +check_include_files(sys/times.h HAVE_SYS_TIMES_H) +check_include_files(sys/types.h HAVE_SYS_TYPES_H) +check_include_files(sys/un.h HAVE_SYS_UN_H) +check_include_files(sys/utsname.h HAVE_SYS_UTSNAME_H) +check_include_files(sys/wait.h HAVE_SYS_WAIT_H) +check_include_files(sysexits.h HAVE_SYSEXITS_H) +check_include_files(term.h HAVE_TERM_H) +check_include_files(termios.h HAVE_TERMIOS_H) +check_include_files(unistd.h HAVE_UNISTD_H) +check_include_files(utime.h HAVE_UTIME_H) +check_include_files(wchar.h HAVE_WCHAR_H) + +set(CMAKE_REQUIRED_LIBRARIES util) + +check_symbol_exists(openpty "pty.h" HAVE_OPENPTY) + +configure_file(from_cpython/Include/pyconfig.h.in from_cpython/Include/pyconfig.h) + +# CMake sucks: it has no idea that pyconfig.h is something that can be installed. +# Just tell it to install whatever file is at that particular location, and rely on +# the rest of the build rules to ensure that it's made in time. +install(FILES ${CMAKE_BINARY_DIR}/from_cpython/Include/pyconfig.h DESTINATION from_cpython/Include) diff --git a/docs/Doxyfile.in b/docs/Doxyfile.in new file mode 100644 index 000000000..501ce4fe0 --- /dev/null +++ b/docs/Doxyfile.in @@ -0,0 +1,2354 @@ +# Doxyfile 1.8.6 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = Pyston + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = 0.2 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "An open-source Python implementation using JIT techniques." + +# With the PROJECT_LOGO tag one can specify an logo or icon that is included in +# the documentation. The maximum height of the logo should not exceed 55 pixels +# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo +# to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = @CMAKE_BINARY_DIR@/docs + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = YES + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a +# new page for each member. If set to NO, the documentation of a member will be +# part of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:\n" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". You can put \n's in the value part of an alias to insert +# newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding "class=itcl::class" +# will allow you to use the command class in the itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, Javascript, +# C#, C, C++, D, PHP, Objective-C, Python, Fortran, VHDL. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. +# +# Note For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by by putting a % sign in front of the word +# or globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = YES + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = YES + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = YES + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = YES + +# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO these classes will be included in the various overviews. This option has +# no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# (class|struct|union) declarations. If set to NO these declarations will be +# included in the documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file +# names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. +# The default value is: system dependent. + +CASE_SENSE_NAMES = NO + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = YES + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the +# todo list. This list is created by putting \todo commands in the +# documentation. +# The default value is: YES. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the +# test list. This list is created by putting \test commands in the +# documentation. +# The default value is: YES. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES the list +# will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. Do not use file names with spaces, bibtex cannot handle them. See +# also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some parameters +# in a documented function, or documenting parameters that don't exist or using +# markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO doxygen will only warn about wrong or incomplete parameter +# documentation, but not about the absence of documentation. +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = @CMAKE_SOURCE_DIR@/src + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: http://www.gnu.org/software/libiconv) for the list of +# possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = *.c \ + *.cc \ + *.cxx \ + *.cpp \ + *.c++ \ + *.java \ + *.ii \ + *.ixx \ + *.ipp \ + *.i++ \ + *.inl \ + *.idl \ + *.ddl \ + *.odl \ + *.h \ + *.hh \ + *.hxx \ + *.hpp \ + *.h++ \ + *.cs \ + *.d \ + *.php \ + *.php4 \ + *.php5 \ + *.phtml \ + *.inc \ + *.m \ + *.markdown \ + *.md \ + *.mm \ + *.dox \ + *.py \ + *.f90 \ + *.f \ + *.for \ + *.tcl \ + *.vhd \ + *.vhdl \ + *.ucf \ + *.qsf \ + *.as \ + *.js + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = * + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER ) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = YES + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# function all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see http://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in +# which the alphabetical index list will be split. +# Minimum value: 1, maximum value: 20, default value: 5. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- +# defined cascading style sheet that is included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefor more robust against future updates. +# Doxygen will copy the style sheet file to the output directory. For an example +# see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the stylesheet and background images according to +# this color. Hue is specified as an angle on a colorwheel, see +# http://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use grayscales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to NO can help when comparing the output of multiple runs. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: http://developer.apple.com/tools/xcode/), introduced with +# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a +# Makefile in the HTML output directory. Running make will produce the docset in +# that directory and running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on +# Windows. +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated ( +# YES) or that it should be included in the master .chm file ( NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated ( +# YES) or a normal table of contents ( NO) in the .chm file. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- +# folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- +# filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location of Qt's +# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the +# generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# further fine-tune the look of the index. As an example, the default style +# sheet generated by doxygen has an example that shows how to put an image at +# the root of the tree instead of the PROJECT_NAME. Since the tree basically has +# the same information as the tab index, you could consider setting +# DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = YES + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = NO + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. See the MathJax site (see: +# http://docs.mathjax.org/en/latest/output.html) for more details. +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility), NativeMML (i.e. MathML) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from http://www.mathjax.org before deployment. +# The default value is: http://cdn.mathjax.org/mathjax/latest. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /