diff --git a/.gitignore b/.gitignore index c08bd91..f3e149f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -# -- Mac OS files (from github's Global/OSX.gitignore) -- +<# -- Mac OS files (from github's Global/OSX.gitignore) -- .DS_Store .AppleDouble .LSOverride @@ -58,85 +58,13 @@ xcuserdata DerivedData *.xcuserstate -# -- Automake files (http://www.gnu.org/software/automake) - -Makefile -Makefile.in -.libs -.dirstamp -test-suite.log -/ar-lib -/mdate-sh -/py-compile -/test-driver -/ylwrap - -# --- Autoconf files (http://www.gnu.org/software/autoconf) - -aminclude_static.am -config.h -config.h.in -config.h.in~ -config.log -config.status -.deps -autom4te.cache -/autoscan.log -/autoscan-*.log -/aclocal.m4 -/compile -/config.guess -/config.h.in -/config.sub -/configure -/configure.scan -/depcomp -/install-sh -/missing -/stamp-h1 - -# --- Files added by autoreconf -i - -/ltmain.sh -libtool -COPYING -INSTALL - -# --- Texinfo files (http://www.gnu.org/software/texinfo) - -/texinfo.tex - -# --- M4 files (http://www.gnu.org/software/m4) - -m4/libtool.m4 -m4/ltoptions.m4 -m4/ltsugar.m4 -m4/ltversion.m4 -m4/lt~obsolete.m4 - # -- project specific ignores -- +lib* +build config.sh -exe -exe.prof* -vgcore* -*.o -*.dSYM -*.swp -*.swo -*.gcno -*.lo -*.la -*.gcda -*.log -*.trs +vgcore* -test/*.txt -test/ut*/*.txt - -benchmark/*/benchmark_*.out - -examples/*/*.txt -examples/*/*/*.txt +benchmark/*/*.out doc/*.aux doc/*.dvi @@ -149,8 +77,6 @@ doc/*.fdb_latexmk doc/*.fls doc/*.ilg doc/*.ind -doc/*.lb doc/auto + doc/doxygen/*/* -!doc/doxygen/doxygen.conf -!doc/doxygen/doxygen.html diff --git a/.travis.yml b/.travis.yml index 964421c..a503f0e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,73 +1,93 @@ -sudo: required -dist: trusty +dist: xenial language: c++ services: - docker +addons: + apt: + packages: + - valgrind + - libgsl-dev + + homebrew: + packages: + - valgrind + - gsl + matrix: include: - #- os: linux - # env: - # - MATRIX_EVAL="wget ftp://ftp.gnu.org/gnu/gsl/gsl-2.3.tar.gz && tar -xvf gsl-2.3.tar.gz && cd gsl-2.3 && ./configure && make -j2 && sudo make install && cd ../ && export LDFLAGS=-L/usr/local/lib && export CPPFLAGS=-I/usr/local/include" - # - USE_DOCKER="FALSE" - # - USE_GCOV="FALSE" - - os: linux // arch linux docker env: + - MYCXX="g++" - USE_DOCKER="TRUE" - - USE_OPENMP="FALSE" - USE_GCOV="FALSE" - - - os: linux // with enabled openmp - env: - - USE_DOCKER="TRUE" - USE_OPENMP="TRUE" - - USE_GCOV="FALSE" - os: osx - osx_image: xcode8 + osx_image: xcode10.1 env: - - MATRIX_EVAL="brew update && brew install gsl valgrind" - MYCXX="g++" - USE_DOCKER="FALSE" - - USE_GCOV="FALSE" + - USE_GCOV="TRUE" + - USE_OPENMP="FALSE" - os: osx - osx_image: xcode8 + osx_image: xcode10.1 env: - - MATRIX_EVAL="brew update && brew install gsl valgrind" - MYCXX="clang++" - USE_DOCKER="FALSE" - - USE_GCOV="TRUE" + - USE_GCOV="FALSE" + - USE_OPENMP="FALSE" before_install: - | if [[ "$USE_DOCKER" == "TRUE" ]]; then docker pull nnvmc/base; - local configopt="--enable-debug" - if [[ "$USE_OPENMP" == "TRUE" ]]; then configopt="${configopt} --enable-openmp"; fi; - if [[ "$USE_GCOV" == "TRUE" ]]; then configopt="${configopt} --enable-coverage"; fi; - docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo && ./autogen.sh && ./configure ${configopt}"; else - eval "${MATRIX_EVAL}" && ${MYCXX} -v; - ./autogen.sh - local configopt="--enable-debug" - if [[ "$USE_GCOV" == "TRUE" ]]; then pip install cpp-coveralls; configopt="${configopt} --enable-coverage"; fi; - ./configure CXX=${MYCXX} ${configopt}; + ${MYCXX} -v; fi; + echo "CXX_COMPILER=${MYCXX}" >> config.sh; + echo "CXX_FLAGS=\"-O0 -g -Wall -Wno-unused-function ${configopt}\"" >> config.sh; + if [[ "$USE_GCOV" == "TRUE" ]]; then + echo "USE_COVERAGE=1" >> config.sh; + else + echo "USE_COVERAGE=0" >> config.sh; + fi; + if [[ "$USE_OPENMP" == "TRUE" ]]; then + echo "USE_OPENMP=1" >> config.sh; + else + echo "USE_OPENMP=0" >> config.sh; + fi; + echo "GSL_ROOT=" >> config.sh; script: - | if [[ "$USE_DOCKER" == "TRUE" ]]; then - docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo && make check" || exit 1; - if [[ "$USE_GCOV" == "TRUE" ]]; then - docker run -e TRAVIS=$TRAVIS -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "pip install cpp-coveralls && cd /root/repo && coveralls -b lib -e test -e benchmark -e include -e lib -e examples -e script -i src --gcov-options '\-lp'"; + docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo && ./build.sh" || exit 1; + else + ./build.sh || exit 1; fi; + +- | + if [[ "$USE_DOCKER" == "TRUE" ]]; then + if [[ "$USE_OPENMP" == "TRUE" ]]; then + docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo/build && make test"; + elif [[ "$USE_GCOV" == "TRUE" ]]; then + docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo/build && make test"; + docker run -e TRAVIS=$TRAVIS -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "pip install cpp-coveralls && cd /root/repo/build && coveralls --verbose -b ./ -r ../ -i include -i src -x .cpp -x .hpp --gcov-options '\-lp'"; else - make check || exit 1; - if [[ "$USE_GCOV" == "TRUE" ]]; then coveralls -b lib -e test -e benchmark -e include -e lib -e examples -e script -i src --gcov-options '\-lp' ; fi; + docker run -it -v $(pwd):/root/repo nnvmc/base /bin/bash -c "cd /root/repo/test && ./run.sh"; + fi; + else + if [[ "$USE_OPENMP" == "TRUE" ]]; then + cd build && make test && cd ..; + elif [[ "$USE_GCOV" == "TRUE" ]]; then + cd build && make test; + sudo pip install cpp-coveralls && coveralls --verbose -b ./ -r ../ -i include -i src -x .cpp -x .hpp --gcov-options '\-lp' && cd .. ; + else + cd test && ./run.sh && cd .. ; + fi; fi; -- cat test/test-suite.log test/exe.log test/ut*/exe.log notifications: email: diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..6352f39 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,41 @@ +cmake_minimum_required (VERSION 3.5) +include(FindPackageHandleStandardArgs) + +project (ffnn LANGUAGES CXX VERSION 0.0.1) + +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_POSITION_INDEPENDENT_CODE ON) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${USER_CXX_FLAGS}") + +if (USE_COVERAGE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") +endif() + +# find packages +message(STATUS "Configured GSL_ROOT_DIR: ${GSL_ROOT_DIR}") + +if (USE_OPENMP) + find_package(OpenMP) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS} -DOPENMP") + message(STATUS "OPENMP_LIBRARY_PATHS: ${OpenMP_CXX_LIBRARY}") + message(STATUS "OPENMP_LIBRARIES: ${OpenMP_CXX_LIBRARIES}") +endif() + +find_package(GSL) +message(STATUS "GSL_INCLUDE_DIRS: ${GSL_INCLUDE_DIRS}") +message(STATUS "GSL_LIBRARIES: ${GSL_LIBRARIES}") + +message(STATUS "Configured CMAKE_CXX_COMPILER: ${CMAKE_CXX_COMPILER}") +message(STATUS "Configured CMAKE_CXX_FLAGS: ${CMAKE_CXX_FLAGS}") + +# set header / library paths +include_directories(include/ "${GSL_INCLUDE_DIRS}") # headers + +enable_testing() + +# continue with subdirectories +add_subdirectory(src) +add_subdirectory(test) +add_subdirectory(benchmark) +add_subdirectory(examples) diff --git a/Makefile.am b/Makefile.am deleted file mode 100644 index 57fb694..0000000 --- a/Makefile.am +++ /dev/null @@ -1,34 +0,0 @@ -ACLOCAL_AMFLAGS = -I m4 - -SUBDIRS = lib test benchmark examples - -# SUBDIRS targets - -lib: - cd lib && $(MAKE) - -test: lib - cd test && $(MAKE) - -benchmark: lib - cd benchmark && $(MAKE) - -examples: lib - cd examples && $(MAKE) - - -# Special targets - -include-links: - cd include && $(MAKE) include-links - -source-lists: - cd lib && $(MAKE) source-lists - -update-sources: include-links source-lists - - -run-benchmarks: benchmark - cd benchmark && $(MAKE) run-benchmarks - -.PHONY: lib test benchmark examples source-lists include-links update-sources run-benchmarks diff --git a/README.md b/README.md index e9103d0..dfd4aab 100644 --- a/README.md +++ b/README.md @@ -6,87 +6,52 @@ # FeedForwardNeuralNetwork -C++ Library for building and using a Feed Forward Neural Network. -It includes first and second derivatives in respect to the input values, and first derivatives in respect to the variational parameters. +C++ Library for building and using Feed Forward Neural Networks. +It includes first and second derivatives with respect to the input values, first derivatives with respect to the variational parameters +and mixed derivatives with respect to both input and variational parameters. To get you started, there is a user manual pdf in `doc/` and in `examples/` there are several basic examples. -Most subdirectories come with a `README.md` file, explaining the purpose and what you need to know. +In `test/` you can find the unit tests and benchmarking programs in `benchmark`. +Some subdirectories come with an own `README.md` file which provides further information. # Supported Systems Currently, we automatically test the library on Arch Linux (GCC 8) and MacOS (with clang as well as brewed GCC 8). -However, in principle any system with C++11 supporting compiler should work, at least if you manage to install all dependencies. +However, in principle any system with C++11 supporting compiler should work. +# Requirements -# Build the library - -Make sure you have a reasonably recent development version (>=2.3?) of the GSL library on your system. Furthermore, we rely on the Autotools build system and libtool. -Optionally, if you have valgrind installed on your system, it will be used to check for memory errors when running unittests. - -If you have the GSL librariy in non-standard paths or want to use custom compiler flags, copy a little script: - - `cp script/config_template.sh config.sh` - -Now edit `config.sh` to your needs and before proceeding run: - - `source config.sh` - -If you have the prerequisites, you may setup the build environment by using the following script in the top level directory: - - `./autogen.sh` - -Now you want to configure the build process for your platform by invoking: - - `./configure` - -Finally, you are ready to compile all the code files in our repository together, by: - - `make` or `make -jN` - -where N is the number of parallel threads used by make. Alternatively, you may use the following make targets to build only subparts of the project: +- CMake, to use our build process +- GNU Scientific Library (~2.3+) +- (optional) OpenMP, to use parallelized propagation (make sure that it is beneficial in your case!) +- (optional) valgrind, to run `./run.sh` in `test/` +- (optional) gperftools, ro run `./run_prof.sh` in `benchmark/` +- (optional) pdflatex, to compile the tex file in `doc/` +- (optional) doxygen, to generate doxygen documentation in `doc/doxygen` - `make lib`, `make test`, `make benchmark`, `make examples` +# Build the library -As long as you changed, but didn't remove or add source files, it is sufficient to only run `make` again to rebuild. - -If you however removed old or added new code files under `src/`, you need to first update the source file lists and include links. Do so by invoking from root folder: - - `make update-sources` - -NOTE: All the subdirectories of test, benchmark and examples support calling `make` inside them to recompile local changes. - - - -# Installation - -To install the freshly built library and headers into the standard system paths, run (usually sudo is required): - `make install` - -If you however want to install the library under a custom path, before installing you have to use - `./configure --prefix=/your/absolute/path - - - -# Build options - -You may enable special compiler flags by using one or more of the following options after `configure`: +Copy the file `config_template.sh` to `config.sh`, edit it to your liking and then simply execute the command - `--enable-debug` : Enables flags (like \-g and \-O0) suitable for debugging + `./build.sh` - `--enable-coverage` : Enables flags to generate test coverage reports via gcov +Note that we build out-of-tree, so the compiled library and executable files can be found in the directories under `./build/`. - `--enable-profiling` : Enables flags to generate performance profiles for benchmarks +# First steps +You may want to read `doc/user_manual.pdf` to get a quick overview of the libraries functionality. However, it is not guaranteed to be perfectly up-to-date and accurate. +Therefore, the best way to get your own code started is by studying the examples in `examples/`. See `examples/README.md` for further guidance. -## Multi-threading: OpenMP +# Multi-threading: OpenMP This library supports multi-threading computation with a shared memory paradigm, thanks to OpenMP. -To activate this feature use `--enable-openmp` at configuration. Currently it is not recommended to use this for most cases. +To activate this feature, set `USE_OPENMP=1` inside your config.sh, before building. It is recommended to use this only for larger networks. +You can fine tune performance by setting the `OMP_NUM_THREADS` environment variable. diff --git a/autogen.sh b/autogen.sh deleted file mode 100755 index 36c207e..0000000 --- a/autogen.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -autoreconf -fi || exit 1 diff --git a/benchmark/CMakeLists.txt b/benchmark/CMakeLists.txt new file mode 100644 index 0000000..4ae054b --- /dev/null +++ b/benchmark/CMakeLists.txt @@ -0,0 +1,6 @@ +include_directories(common/) +link_libraries(ffnn) + +add_executable(bench_actfs_derivs bench_actfs_derivs/main.cpp) +add_executable(bench_actfs_ffprop bench_actfs_ffprop/main.cpp) +add_executable(bench_nunits_ffprop bench_nunits_ffprop/main.cpp) diff --git a/benchmark/Makefile.am b/benchmark/Makefile.am deleted file mode 100644 index de4df3b..0000000 --- a/benchmark/Makefile.am +++ /dev/null @@ -1,6 +0,0 @@ -SUBDIRS = bench_actfs_derivs bench_actfs_ffprop - -run-benchmarks: - for dir in $(SUBDIRS); do \ - if [[ "$$dir" == *"bench_"* ]]; then $(MAKE) -C $$dir run-benchmark; fi; \ - done diff --git a/benchmark/README.md b/benchmark/README.md index 8b2a837..e0f6c5b 100644 --- a/benchmark/README.md +++ b/benchmark/README.md @@ -11,28 +11,33 @@ Currently there are the following benchmarks: `bench_actfs_ffprop`: Benchmark of a FFNN's propagation for various hidden layer activation functions. + `bench_nunits_ffprop`: Benchmark of a FFNN's propagation for different sizes of input and hidden layers. + # Using the benchmarks -Enter the desired benchmark's directory and execute: - `make run-benchmark` +Just provide the script `run.sh` the desired benchmark's name, e.g.: + `./run.sh bench_actfs_ffprop` -Instead you may also run all benchmarks together by calling from root or from top benchmark folder: - `make run-benchmarks` +Alternatively, you can run all benchmarks sequentially by calling: + `./run_all.sh` -Each benchmark will write the result into a file `benchmark_new.out`. For visualization execute the plot script: +The benchmark results will be written to a file named `benchmark_new.out` under the respective benchmark folder. +You may visualize the result by entering that directory and using: `python plot.py benchmark_new.out` To let the plot compare the new result versus an older one, you have to provide the old output file like: `python plot.py benchmark_old.out benchmark_new.out`. -You may also change new/old to more meaningful labels, anything like benchmark_*.out is allowed (except extra _ or . characters). +You may also change new/old to more meaningful labels, anything like benchmark_*.out is allowed (except extra _ or . characters). The +provided labels will be used automatically to create the plot legends. # Profiling -If you want to use the benchmarks for profiling, recompile the library and benchmarks after configuring - `./configure --enable-profiling` +If you want to performance profile the library under execution of a benchmark, +you just need to provide gperftools's libprofiler.so library to `run_prof.sh` as second argument, e.g.: + `./run_prof.sh bench_actfs_ffprop /usr/lib/libprofiler.so` -Then execute a benchmark via make (!) and afterwards view the profile with: - `pprof --text exe exe.prof` +Note that this script does not save any benchmark results. +Also note that for profiling you might want to avoid LTO flags when building the library, to avoid cryptic LTO chunk names in the profile. diff --git a/benchmark/bench.am b/benchmark/bench.am deleted file mode 100644 index e2464de..0000000 --- a/benchmark/bench.am +++ /dev/null @@ -1,27 +0,0 @@ -AM_LDFLAGS += -lffnn -AM_CPPFLAGS += -I../common - -if !DEBUG - AM_CXXFLAGS += $(OPTFLAGS) -endif - -if PROFILING - AM_CXXFLAGS += $(PROF_CFLAGS) - AM_LDFLAGS += $(PROF_LFLAGS) -endif - -noinst_PROGRAMS = exe -exe_SOURCES = main.cpp - -if PROFILING -clean-local: - rm -f exe.prof* - -run-benchmark: exe - CPUPROFILE=exe.prof ./exe > benchmark_new.out - pprof --text exe exe.prof -else -run-benchmark: exe - ./exe > benchmark_new.out - cat benchmark_new.out -endif diff --git a/benchmark/bench_actfs_derivs/Makefile.am b/benchmark/bench_actfs_derivs/Makefile.am deleted file mode 100644 index 6f0c190..0000000 --- a/benchmark/bench_actfs_derivs/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../bench.am diff --git a/benchmark/bench_actfs_derivs/main.cpp b/benchmark/bench_actfs_derivs/main.cpp index 1b7d1b3..4bb4e00 100644 --- a/benchmark/bench_actfs_derivs/main.cpp +++ b/benchmark/bench_actfs_derivs/main.cpp @@ -2,18 +2,18 @@ #include #include -#include "ActivationFunctionManager.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/io/PrintUtilities.hpp" -#include "FFNNBenchmarks.cpp" +#include "FFNNBenchmarks.hpp" using namespace std; -void run_single_benchmark(const string &label, const string &actf_id, const double * const indata, const int neval, const int nruns, const bool flag_d1, const bool flag_d2, const bool flag_d3, const bool flag_fad) { +void run_single_benchmark(const string &label, const string &actf_id, const double * const xdata, const int neval, const int nruns, const bool flag_d1, const bool flag_d2, const bool flag_d3, const bool flag_fad) { pair result; const double time_scale = 1000000000.; //nanoseconds - result = sample_benchmark_actf_derivs(std_actf::provideActivationFunction(actf_id), indata, neval, nruns, flag_d1, flag_d2, flag_d3, flag_fad); + result = sample_benchmark_actf_derivs(std_actf::provideActivationFunction(actf_id), xdata, neval, nruns, flag_d1, flag_d2, flag_d3, flag_fad); cout << label << ":" << setw(max(1, 11-(int)label.length())) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " nanoseconds" << endl; } @@ -24,7 +24,7 @@ int main (void) { const int nactfs = 8; const string actf_ids[nactfs] = {"LGS", "GSS", "ID", "TANS", "SIN", "RELU", "SELU", "SRLU"}; - double * const indata = new double[neval]; // 1d input data for actf bench + double * xdata = new double[neval]; // 1d input data for actf bench // generate some random input random_device rdev; @@ -33,7 +33,7 @@ int main (void) { rgen = mt19937_64(rdev()); rgen.seed(18984687); rd = uniform_real_distribution(-sqrt(3.), sqrt(3.)); // uniform with variance 1 - for (int i=0; i #include -#include "ActivationFunctionManager.hpp" -#include "PrintUtilities.hpp" -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/io/PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" -#include "FFNNBenchmarks.cpp" +#include "FFNNBenchmarks.hpp" using namespace std; -void run_single_benchmark(const string &label, FeedForwardNeuralNetwork * const ffnn, const double * const * const xdata, const int neval, const int nruns) { +void run_single_benchmark(const string &label, FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval, const int nruns) { pair result; const double time_scale = 1000000.; //microseconds @@ -29,8 +29,8 @@ int main (void) { const int nactfs = 8; const string actf_ids[nactfs] = {"LGS", "GSS", "ID", "TANS", "SIN", "RELU", "SELU", "SRLU"}; - double ** const xdata = new double*[neval]; // xndim input data for propagate bench - for (int i=0; i(-sqrt(3.), sqrt(3.)); // uniform with variance 1 - for (int i=0; i +#include +#include + +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/io/PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" + +#include "FFNNBenchmarks.hpp" + +using namespace std; + +void run_single_benchmark(const string &label, FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval, const int nruns) { + pair result; + const double time_scale = 1000000.; //microseconds + + result = sample_benchmark_FFPropagate(ffnn, xdata, neval, nruns); + cout << label << ":" << setw(max(1, 20-(int)label.length())) << setfill(' ') << " " << result.first/neval*time_scale << " +- " << result.second/neval*time_scale << " microseconds" << endl; +} + +int main (void) { + const int neval[3] = {50000, 1000, 20}; + const int nruns = 5; + + const int nhl = 2; + const int yndim = 1; + const int xndim[3] = {6, 24, 96}, nhu1[3] = {12, 48, 192}, nhu2[3] = {6, 24, 96}; + + int ndata[3], ndata_full = 0; + for (int i=0; i<3; ++i) { + ndata[i] = neval[i]*xndim[i]; + ndata_full += ndata[i]; + } + double * xdata = new double[ndata_full]; // xndim input data for propagate bench + + // generate some random input + random_device rdev; + mt19937_64 rgen; + uniform_real_distribution rd; + rgen = mt19937_64(rdev()); + rgen.seed(18984687); + rd = uniform_real_distribution(-sqrt(3.), sqrt(3.)); // uniform with variance 1 + for (int i=0; ipushHiddenLayer(nhu2[inet]); + ffnn->connectFFNN(); + ffnn->assignVariationalParameters(); + + cout << "FFPropagate benchmark with " << nruns << " runs of " << neval[inet] << " FF-Propagations, for a FFNN of shape " << xndim[inet] << "x" << nhu1[inet] << "x" << nhu2[inet] << "x" << yndim << " ." << endl; + cout << "=========================================================================================" << endl << endl; + cout << "NN structure looks like:" << endl << endl; + printFFNNStructure(ffnn, true, 0); + cout << endl; + cout << "Benchmark results (time per propagation):" << endl; + + run_single_benchmark("f", ffnn, xdata+xoffset, neval[inet], nruns); + + ffnn->addFirstDerivativeSubstrate(); + run_single_benchmark("f+d1", ffnn, xdata+xoffset, neval[inet], nruns); + + ffnn->addSecondDerivativeSubstrate(); + run_single_benchmark("f+d1+d2", ffnn, xdata+xoffset, neval[inet], nruns); + + ffnn->addVariationalFirstDerivativeSubstrate(); + run_single_benchmark("f+d1+d2+vd1", ffnn, xdata+xoffset, neval[inet], nruns); + + /* these currently kill 16GB+ of memory on the largest nets */ + //ffnn->addCrossFirstDerivativeSubstrate(); + //run_single_benchmark("f+d1+d2+vd1+cd1", ffnn, xdata+xoffset, neval[inet], nruns); + + //ffnn->addCrossSecondDerivativeSubstrate(); + //run_single_benchmark("f+d1+d2+vd1+cd1+cd2", ffnn, xdata+xoffset, neval[inet], nruns); + + cout << "=========================================================================================" << endl << endl << endl; + + delete ffnn; + xoffset += ndata[inet]; + } + + delete [] xdata; + return 0; +} + diff --git a/benchmark/bench_nunits_ffprop/plot.py b/benchmark/bench_nunits_ffprop/plot.py new file mode 100644 index 0000000..30d7210 --- /dev/null +++ b/benchmark/bench_nunits_ffprop/plot.py @@ -0,0 +1,115 @@ +from pylab import * + +class benchmark_nunits_ffprop: + + def __init__(self, filename, label): + self.label = label + self.data = {} + + bnew = True + with open(filename) as bmfile: + for line in bmfile: + + lsplit = line.split() + + if len(lsplit) < 5: + continue + + if lsplit[0] == 'FFPropagate': + if not bnew: + self.data[net_shape] = net_data # store previous net's data + + net_shape = lsplit[13] + net_data = {} + bnew = False + continue + + if lsplit[0][0:2] == 'f:' or lsplit[0][0:2] == 'f+': + net_data[lsplit[0][:-1]] = (float(lsplit[1]), float(lsplit[3])) + + self.data[net_shape] = net_data # store last net's data + + +def plot_compare_nets(benchmark_list, **kwargs): + nbm = len(benchmark_list) + xlabels = benchmark_list[0].data[list(benchmark_list[0].data.keys())[0]].keys() # get the xlabels from first entry in data dict + + fig = figure() + fig.suptitle('FFPropagate benchmark, comparing different net sizes',fontsize=14) + + itp=0 + for benchmark in benchmark_list: + + itp+=1 + ax = fig.add_subplot(nbm, 1, itp) + for net in benchmark.data.keys(): + values = [v[0] for v in benchmark.data[net].values()] + errors = [v[1] for v in benchmark.data[net].values()] + ax.errorbar(xlabels, values, xerr=None, yerr=errors, **kwargs) + + ax.set_yscale('log') + ax.set_title(benchmark.label + ' version') + ax.set_ylabel('Time per propagation [$\mu s$]') + ax.legend(benchmark.data.keys()) + + return fig + + +def plot_compare_runs(benchmark_list, net_list, width = 0.8, **kwargs): + nbm = len(benchmark_list)-1 + if nbm <= 0: + print('Error: Not enough benchmarks for comparison plot.') + return None + + bwidth = width/float(nbm) + nnet = len(net_list) + if nbm > 1: + ind = arange(len(benchmark_list[0].data[net_list[0]]), 0, -1) + else: + ind = arange(len(benchmark_list[0].data[net_list[0]]), 0, -1) - 0.5*bwidth + xlabels = benchmark_list[0].data[net_list[0]].keys() + + fig = figure() + fig.suptitle('FFPropagate benchmark, comparing against ' + benchmark_list[0].label + ' version',fontsize=14) + + itp = 0 + for ita, net in enumerate(net_list): + + itp+=1 + ax = fig.add_subplot(nnet, 1, itp) + scales = array([100./v[0] for v in benchmark_list[0].data[net].values()]) # we will normalize data to the first benchmark's results + for itb, benchmark in enumerate(benchmark_list[1:]): + values = array([v[0] for v in benchmark.data[net].values()])*scales + errors = array([v[1] for v in benchmark.data[net].values()])*scales + rects = ax.barh(ind - itb*bwidth, values, bwidth, xerr=errors, **kwargs) + for rect in rects: + ax.text(1., rect.get_y() + rect.get_height()/2., '%d' % int(rect.get_width()), ha='left', va='center', fontsize=8) + + ax.set_title(net + ' net') + if ita==len(net_list)-1: + ax.set_xlabel('Time per propagation [%]') + ax.set_xlim([0,200]) + ax.set_yticks(ind - 0.5*(nbm-1)*bwidth) + ax.set_yticklabels(xlabels) + ax.legend([benchmark.label for benchmark in benchmark_list[1:]]) + + return fig + +# Script + +benchmark_list = [] +for benchmark_file in sys.argv[1:]: + try: + benchmark = benchmark_nunits_ffprop(benchmark_file, benchmark_file.split('_')[1].split('.')[0]) + benchmark_list.append(benchmark) + except(OSError): + print("Warning: Couldn't load benchmark file " + benchmark_file + "!") + +if len(benchmark_list)<1: + print("Error: Not even one benchmark loaded!") +else: + fig1 = plot_compare_nets(benchmark_list, fmt='o--') + if len(benchmark_list)>1: + fig2 = plot_compare_runs(benchmark_list, ['6x12x6x1', '24x48x24x1', '96x192x96x1']) + +show() diff --git a/benchmark/common/FFNNBenchmarks.cpp b/benchmark/common/FFNNBenchmarks.hpp similarity index 61% rename from benchmark/common/FFNNBenchmarks.cpp rename to benchmark/common/FFNNBenchmarks.hpp index 3074769..42bbe6e 100644 --- a/benchmark/common/FFNNBenchmarks.cpp +++ b/benchmark/common/FFNNBenchmarks.hpp @@ -2,25 +2,23 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "Timer.cpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "Timer.hpp" -double benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const * const xdata, const int neval) { - Timer * const timer = new Timer(); - double time; +double benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval) { + Timer timer(1.); + const int ninput = ffnn->getNInput(); - timer->reset(); + timer.reset(); for (int i=0; isetInput(xdata[i]); + ffnn->setInput(xdata+i*ninput); ffnn->FFPropagate(); } - time = timer->elapsed(); - delete timer; - return time; + return timer.elapsed(); } -std::pair sample_benchmark_FFPropagate(FeedForwardNeuralNetwork * ffnn, const double * const * const xdata, const int neval, const int nruns) { +std::pair sample_benchmark_FFPropagate(FeedForwardNeuralNetwork * const ffnn, const double * const xdata, const int neval, const int nruns) { double times[nruns]; double mean = 0., err = 0.; @@ -37,33 +35,30 @@ std::pair sample_benchmark_FFPropagate(FeedForwardNeuralNetwork return result; } -double benchmark_actf_derivs(ActivationFunctionInterface * actf, const double * const xdata, const int neval, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) { - Timer * const timer = new Timer(); - double time, v, v1d, v2d, v3d; +double benchmark_actf_derivs(ActivationFunctionInterface * const actf, const double * const xdata, const int neval, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) { + Timer timer(1.); + double v=0., v1d=0., v2d=0., v3d=0.; if (flag_fad) { - timer->reset(); + timer.reset(); for (int i=0; ifad(xdata[i], v, v1d, v2d, v3d, flag_d1, flag_d2, flag_d3); } - time = timer->elapsed(); + return timer.elapsed(); } else { - timer->reset(); + timer.reset(); for (int i=0; if(xdata[i]); v1d = flag_d1 ? actf->f1d(xdata[i]) : 0.0; v2d = flag_d2 ? actf->f2d(xdata[i]) : 0.0; v3d = flag_d3 ? actf->f3d(xdata[i]) : 0.0; } - time = timer->elapsed(); + return timer.elapsed(); } - - delete timer; - return time; } -std::pair sample_benchmark_actf_derivs(ActivationFunctionInterface * actf, const double * const xdata, const int neval, const int nruns, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) { +std::pair sample_benchmark_actf_derivs(ActivationFunctionInterface * const actf, const double * const xdata, const int neval, const int nruns, const bool flag_d1 = true, const bool flag_d2 = true, const bool flag_d3 = true, const bool flag_fad = true) { double times[nruns]; double mean = 0., err = 0.; diff --git a/benchmark/common/Timer.cpp b/benchmark/common/Timer.hpp similarity index 83% rename from benchmark/common/Timer.cpp rename to benchmark/common/Timer.hpp index 25c0e7f..bf12c5e 100644 --- a/benchmark/common/Timer.cpp +++ b/benchmark/common/Timer.hpp @@ -4,7 +4,7 @@ class Timer { public: - explicit Timer(const double scale = 1.) : _beg(_clock::now()), _scale(scale) {} + explicit Timer(const double scale) : _beg(_clock::now()), _scale(scale) {} void reset() { _beg = _clock::now(); } double elapsed() const { return _scale * std::chrono::duration_cast<_second>(_clock::now() - _beg).count(); } diff --git a/benchmark/run.sh b/benchmark/run.sh new file mode 100755 index 0000000..f220260 --- /dev/null +++ b/benchmark/run.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +if [ "$1" = "" ]; then + echo "Expected the name of the benchmark to run as first argument." +else + bench=$1 + outfile="$(pwd)/${bench}/benchmark_new.out" + cd ../build/benchmark/ + echo + echo "Running benchmark ${bench}..." + ./${bench} > ${outfile} + cat ${outfile} + echo +fi diff --git a/benchmark/run_all.sh b/benchmark/run_all.sh new file mode 100755 index 0000000..f2e1477 --- /dev/null +++ b/benchmark/run_all.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +for bench in bench_*; do + echo "Running ${bench} ..." + ./run.sh $bench +done diff --git a/benchmark/run_prof.sh b/benchmark/run_prof.sh new file mode 100755 index 0000000..6460068 --- /dev/null +++ b/benchmark/run_prof.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +if [ "$1" = "" ]; then + echo "Expected the name of the benchmark to run as first argument." +elif [ "$2" = "" ]; then + echo "Expected the path of libprofiler.so as second argument." +else + bench=$1 + lprof=$2 + cd ../build/benchmark/ + echo + echo "Running benchmark ${bench}..." + LD_PRELOAD=${lprof} CPUPROFILE=${bench}.prof CPUPROFILE_FREQUENCY=10000 CPUPROFILE_REALTIME=1 ./${bench} + pprof --text ${bench} ${bench}.prof + echo +fi diff --git a/build.sh b/build.sh new file mode 100755 index 0000000..e0335e4 --- /dev/null +++ b/build.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +. ./config.sh +mkdir -p build && cd build +cmake -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" -DUSER_CXX_FLAGS="${CXX_FLAGS}" -DUSE_COVERAGE="${USE_COVERAGE}" -DUSE_OPENMP="${USE_OPENMP}" -DGSL_ROOT_DIR="${GSL_ROOT}" .. + +if [ "$1" = "" ]; then + make -j$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null) +else + make -j$1 +fi diff --git a/config_template.sh b/config_template.sh new file mode 100755 index 0000000..2b17384 --- /dev/null +++ b/config_template.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +#C++ compiler +CXX_COMPILER="g++" + +# C++ flags +CXX_FLAGS="-O3 -flto -Wall -Wno-unused-function" + +# add coverage flags +USE_COVERAGE=0 + +# use OpenMP for parallel propagation +USE_OPENMP=0 + +# GNU Scientific Library +GSL_ROOT="" # provide a path if not in system location diff --git a/configure.ac b/configure.ac deleted file mode 100644 index 6847995..0000000 --- a/configure.ac +++ /dev/null @@ -1,153 +0,0 @@ -# -*- Autoconf -*- -# Process this file with autoconf to produce a configure script. - -AC_PREREQ([2.69]) -AC_INIT([FFNN], [0.1], [ithanil@mail.uni-paderborn.de]) -: ${CXXFLAGS=""} # delete default optimization user flags -AC_CONFIG_HEADERS([config.h]) -AC_CONFIG_MACRO_DIRS([m4]) -AC_LANG([C++]) - -AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects]) - -# --- Options - -# Add debug support -AX_CHECK_ENABLE_DEBUG() -AM_CONDITIONAL(DEBUG, test x"$ax_enable_debug" = x"yes") -AM_COND_IF(DEBUG, - AC_DEFINE(DEBUG, 1, [Define to 0 if this is a release build]), - AC_DEFINE(DEBUG, 0, [Define to 1 or higher if this is a debug build])) - - -# Add coverage support (gcov) -AC_PROG_CXX -AX_COVERAGE() -AM_CONDITIONAL(COVERAGE, test "x$enable_coverage" = x"yes") -AM_COND_IF(COVERAGE, - AC_DEFINE(COVERAGE, 1, [Define to 0 if you don't need code coverage statistics.]), - AC_DEFINE(COVERAGE, 0, [Define to 1 or higher if you want to generate code coverage statistics.])) - - -# Add profiling support (gperftools) -AC_ARG_ENABLE(profiling, - AS_HELP_STRING( - [--enable-profiling], - [enable profiling, default: no]), - [case "${enableval}" in - yes) profiling=true ;; - no) profiling=false ;; - esac], - [profiling=false]) -AM_CONDITIONAL(PROFILING, test x"$profiling" = x"true") -AM_COND_IF(PROFILING, - AC_DEFINE(PROFILING, 1, [Define to 0 if you don't need performance profiling.]), - AC_DEFINE(PROFILING, 0, [Define to 1 or higher if you want to generate performance profiles.])) - - -# Add OpenMP support -AC_ARG_ENABLE(openmp, - AS_HELP_STRING( - [--enable-openmp], - [enable openmp, default: no]), - [case "${enableval}" in - yes) openmp=true ;; - no) openmp=false ;; - esac], - [openmp=false]) -AM_CONDITIONAL(OPENMP, test x"$openmp" = x"true") -AM_COND_IF(OPENMP, - AC_DEFINE(OPENMP, 1, [Define to 0 if you don't want to use threading via OpenMP.]), - AC_DEFINE(OPENMP, 0, [Define to 1 or higher if you want to enable OpenMP threading.])) - - - -# --- Checks - -AM_PROG_AR - -LT_INIT # use libtool - -# Checks for programs. -AC_PROG_AWK -AC_PROG_INSTALL -AC_PROG_MAKE_SET -AC_PROG_CC -AC_PROG_CPP -AC_PROG_CXX -AC_PROG_CXXCPP -AC_PROG_LN_S -AC_PROG_RANLIB - -# check for c++11 support (and add compiler flag) -AX_CXX_COMPILE_STDCXX(11, noext, mandatory) - -# check for valgrind -AX_VALGRIND_CHECK - - -# Checks for libraries. - -# Checks for header files. - -AC_CHECK_HEADERS([gsl/gsl_vector.h gsl/gsl_matrix.h gsl/gsl_blas.h gsl/gsl_multifit_nlinear.h], [ ], [ AC_MSG_ERROR([Unable to find one or more required GSL headers!]) ]) - -# Checks for typedefs, structures, and compiler characteristics. -AC_CHECK_HEADER_STDBOOL -AC_C_INLINE -AC_TYPE_SIZE_T - -# Checks for library functions. -AC_CHECK_FUNCS([pow sqrt]) - - - -# --- Compiler flags - -AC_SUBST([DEBUGFLAGS], ["-g -O0"]) -AM_COND_IF(PROFILING, AC_SUBST([OPTFLAGS], ["-O3"]), AC_SUBST([OPTFLAGS], ["-O3 -flto"])) -AC_SUBST([PROF_CFLAGS], ["-g -DWITHGPERFTOOLS"]) -AC_SUBST([PROF_LFLAGS], ["-lprofiler"]) -AC_SUBST([OMP_CFLAGS], ["-DOPENMP -fopenmp"]) -AC_SUBST([OMP_LFLAGS], ["-lomp"]) - -AC_SUBST([AM_CXXFLAGS], ["-Wall"]) -AC_SUBST([AM_CPPFLAGS], ["-I$(pwd)/include"]) -AC_SUBST([AM_LDFLAGS], ["-L$(pwd)/lib -lgsl -lgslcblas"]) - - -# --- Makefiles - -# output of ./script/generate_ac_config_files.sh -AC_CONFIG_FILES([ -lib/Makefile -include/Makefile -test/ut10/Makefile -test/ut1/Makefile -test/ut8/Makefile -test/ut2/Makefile -test/ut5/Makefile -test/ut9/Makefile -test/ut3/Makefile -test/ut7/Makefile -test/Makefile -test/ut6/Makefile -test/ut4/Makefile -examples/ex9/Makefile -examples/ex7/Makefile -examples/ex1/Makefile -examples/ex6/Makefile -examples/ex4/Makefile -examples/ex5/Makefile -examples/ex2/Makefile -examples/Makefile -examples/ex8/Makefile -examples/ex3/Makefile -examples/ex10/Makefile -Makefile -benchmark/bench_actfs_derivs/Makefile -benchmark/Makefile -benchmark/bench_actfs_ffprop/Makefile -]) - -AC_OUTPUT diff --git a/doc/doxygen/doxygen.conf b/doc/doxygen/doxygen.conf index e9f5ca3..aac79e3 100644 --- a/doc/doxygen/doxygen.conf +++ b/doc/doxygen/doxygen.conf @@ -791,7 +791,8 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = "../../src" \ +INPUT = "../../include/ffnn" \ + "../../src" \ "doxygen_stl.cpp" # This tag can be used to specify the character encoding of the source files diff --git a/doc/user_manual.pdf b/doc/user_manual.pdf index 4c2e87b..3272815 100644 Binary files a/doc/user_manual.pdf and b/doc/user_manual.pdf differ diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt new file mode 100644 index 0000000..7dfe385 --- /dev/null +++ b/examples/CMakeLists.txt @@ -0,0 +1,12 @@ +link_libraries(ffnn) + +add_executable(ex1.exe ex1/main.cpp) +add_executable(ex2.exe ex2/main.cpp) +add_executable(ex3.exe ex3/main.cpp) +add_executable(ex4.exe ex4/main.cpp) +add_executable(ex5.exe ex5/main.cpp) +add_executable(ex6.exe ex6/main.cpp) +add_executable(ex7.exe ex7/main.cpp) +add_executable(ex8.exe ex8/main.cpp) +add_executable(ex9.exe ex9/main.cpp) +add_executable(ex10.exe ex10/main.cpp) diff --git a/examples/Makefile.am b/examples/Makefile.am deleted file mode 100644 index 3ece124..0000000 --- a/examples/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -SUBDIRS = ex1 ex2 ex3 ex4 ex5 ex6 ex7 ex8 ex9 ex10 diff --git a/examples/README.md b/examples/README.md index 73aa4ad..974ff41 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,9 +1,10 @@ # LEGEND OF THE EXAMPLES -Make sure the examples are compiled, by running `make examples` in the project root folder. -Execute an example by switching into one of the example folders and running `./exe`. -Some examples might also contain a `plot.py` script to show a plot. -Run it after the exe by `python plot.py` (requires matplotlib). +Make sure the examples are compiled, by running `./build.sh` in the project root folder. +Execute an example by switching into one of the example folders and running `./run.sh`. +Note that the actual example executables reside inside the `build/examples/` folder under the project's root. +Some examples might also contain a `plot.py` script to show a plot. It gets called automatically +by `./run.sh` after the executable has terminated, but requires python with matplotlib. ## Example 1 @@ -59,6 +60,7 @@ Run it after the exe by `python plot.py` (requires matplotlib). `ex9/`: use NNTrainer(GSL) to make FFNN fit a gaussian + ## Example 10 `ex10/`: use a FeatureMapLayer to fit more easily a gaussian of x-y-distance diff --git a/examples/ex1/Makefile.am b/examples/ex1/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex1/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex1/main.cpp b/examples/ex1/main.cpp index 9303eaf..2f997aa 100644 --- a/examples/ex1/main.cpp +++ b/examples/ex1/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" int main() { diff --git a/examples/ex1/run.sh b/examples/ex1/run.sh new file mode 100755 index 0000000..315481f --- /dev/null +++ b/examples/ex1/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex1.exe diff --git a/examples/ex10/Makefile.am b/examples/ex10/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex10/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex10/main.cpp b/examples/ex10/main.cpp index 9c11f4c..0d40080 100644 --- a/examples/ex10/main.cpp +++ b/examples/ex10/main.cpp @@ -2,7 +2,7 @@ #include #include -#include "NNTrainerGSL.hpp" +#include "ffnn/train/NNTrainerGSL.hpp" /* diff --git a/examples/ex10/plot.py b/examples/ex10/plot.py index 82f328c..56af7b8 100644 --- a/examples/ex10/plot.py +++ b/examples/ex10/plot.py @@ -22,11 +22,12 @@ def f_d1_2(y): def f_d2_2(y): return f_d1_2(y) * (2*(x0-y) - 2*y) - 4.0 * f_v_2(y) +prefix = '../../build/examples/' fnames = ['v_0_0','v_1_0', 'd1_0_0', 'd1_1_0', 'd2_0_0', 'd2_1_0'] files = {} for fname in fnames: - files[fname + '_' + 'NN'] = './' + fname + '.txt' + files[fname + '_' + 'NN'] = prefix + fname + '.txt' data = {} for file in files: diff --git a/examples/ex10/run.sh b/examples/ex10/run.sh new file mode 100755 index 0000000..7365751 --- /dev/null +++ b/examples/ex10/run.sh @@ -0,0 +1,6 @@ +#!/bin/sh +( +cd ../../build/examples +./ex10.exe +) +python plot.py diff --git a/examples/ex2/Makefile.am b/examples/ex2/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex2/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex2/main.cpp b/examples/ex2/main.cpp index df7d0a7..1c056ae 100644 --- a/examples/ex2/main.cpp +++ b/examples/ex2/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" int main() { diff --git a/examples/ex2/run.sh b/examples/ex2/run.sh new file mode 100755 index 0000000..d2054c1 --- /dev/null +++ b/examples/ex2/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex2.exe diff --git a/examples/ex3/Makefile.am b/examples/ex3/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex3/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex3/main.cpp b/examples/ex3/main.cpp index 3117b70..65ba533 100644 --- a/examples/ex3/main.cpp +++ b/examples/ex3/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" diff --git a/examples/ex3/run.sh b/examples/ex3/run.sh new file mode 100755 index 0000000..3b42d77 --- /dev/null +++ b/examples/ex3/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex3.exe diff --git a/examples/ex4/Makefile.am b/examples/ex4/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex4/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex4/main.cpp b/examples/ex4/main.cpp index 62a1434..7fefa06 100644 --- a/examples/ex4/main.cpp +++ b/examples/ex4/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" diff --git a/examples/ex4/run.sh b/examples/ex4/run.sh new file mode 100755 index 0000000..3e7aba3 --- /dev/null +++ b/examples/ex4/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex4.exe diff --git a/examples/ex5/Makefile.am b/examples/ex5/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex5/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex5/main.cpp b/examples/ex5/main.cpp index 97a7f63..dae97fb 100644 --- a/examples/ex5/main.cpp +++ b/examples/ex5/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" diff --git a/examples/ex5/run.sh b/examples/ex5/run.sh new file mode 100755 index 0000000..943d004 --- /dev/null +++ b/examples/ex5/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex5.exe diff --git a/examples/ex6/Makefile.am b/examples/ex6/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex6/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex6/main.cpp b/examples/ex6/main.cpp index 3f46dbe..2b43483 100644 --- a/examples/ex6/main.cpp +++ b/examples/ex6/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" diff --git a/examples/ex6/run.sh b/examples/ex6/run.sh new file mode 100755 index 0000000..0a25c76 --- /dev/null +++ b/examples/ex6/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex6.exe diff --git a/examples/ex7/Makefile.am b/examples/ex7/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex7/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex7/main.cpp b/examples/ex7/main.cpp index f9d49b0..5df548f 100644 --- a/examples/ex7/main.cpp +++ b/examples/ex7/main.cpp @@ -2,8 +2,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" @@ -80,10 +80,9 @@ int main() { writePlotFile(ffnn, base_input, input_i, output_i, min, max, npoints, "getFirstDerivative", "v1d.txt"); writePlotFile(ffnn, base_input, input_i, output_i, min, max, npoints, "getSecondDerivative", "v2d.txt"); + cout << "Done! In the files v.txt, v1d.txt, and v2d.txt we stored the values, and you can use any software you like to plot them (perhaps gnuplot?)." << endl << endl; - cout << "Done! In the files v.txt, v1d.txt, and v2d.txt we stored the values, and you can use any software you like to plot them (perhaps gnuplot?)."; + cout << "Note that the executable was run within the build/examples/ directory, so you have to look there to find the mentioned output files." << endl; - - cout << endl << endl; return 0; } diff --git a/examples/ex7/run.sh b/examples/ex7/run.sh new file mode 100755 index 0000000..d4186f5 --- /dev/null +++ b/examples/ex7/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh +cd ../../build/examples +./ex7.exe diff --git a/examples/ex8/Makefile.am b/examples/ex8/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex8/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex8/main.cpp b/examples/ex8/main.cpp index b330c22..05d77f7 100644 --- a/examples/ex8/main.cpp +++ b/examples/ex8/main.cpp @@ -4,8 +4,8 @@ #include #include -#include "PrintUtilities.hpp" -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" diff --git a/examples/ex8/run.sh b/examples/ex8/run.sh new file mode 100755 index 0000000..708394e --- /dev/null +++ b/examples/ex8/run.sh @@ -0,0 +1,4 @@ +#!/bin/sh +cp stored_ffnn.txt ../../build/examples/ +cd ../../build/examples +./ex8.exe diff --git a/examples/ex9/Makefile.am b/examples/ex9/Makefile.am deleted file mode 100644 index 7b3221b..0000000 --- a/examples/ex9/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../example.am diff --git a/examples/ex9/main.cpp b/examples/ex9/main.cpp index 9d9556b..8f985a9 100644 --- a/examples/ex9/main.cpp +++ b/examples/ex9/main.cpp @@ -2,7 +2,7 @@ #include #include -#include "NNTrainerGSL.hpp" +#include "ffnn/train/NNTrainerGSL.hpp" /* diff --git a/examples/ex9/plot.py b/examples/ex9/plot.py index e37a506..02d65f7 100644 --- a/examples/ex9/plot.py +++ b/examples/ex9/plot.py @@ -3,11 +3,12 @@ gauss_a = 1.0 gauss_b = 0 +prefix = '../../build/examples/' fnames = ['v_0_0','d1_0_0','d2_0_0'] files = {} for fname in fnames: - files[fname + '_' + 'NN'] = './' + fname + '.txt' + files[fname + '_' + 'NN'] = prefix + fname + '.txt' data = {} for file in files: diff --git a/examples/ex9/run.sh b/examples/ex9/run.sh new file mode 100755 index 0000000..973b4b4 --- /dev/null +++ b/examples/ex9/run.sh @@ -0,0 +1,6 @@ +#!/bin/sh +( +cd ../../build/examples +./ex9.exe +) +python plot.py diff --git a/examples/example.am b/examples/example.am deleted file mode 100644 index 2cd421f..0000000 --- a/examples/example.am +++ /dev/null @@ -1,8 +0,0 @@ -AM_LDFLAGS += -lffnn - -if !DEBUG - AM_CXXFLAGS += $(OPTFLAGS) -endif - -noinst_PROGRAMS = exe -exe_SOURCES = main.cpp diff --git a/include/ActivationFunctionInterface.hpp b/include/ActivationFunctionInterface.hpp deleted file mode 120000 index 75854ab..0000000 --- a/include/ActivationFunctionInterface.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/ActivationFunctionInterface.hpp \ No newline at end of file diff --git a/include/ActivationFunctionManager.hpp b/include/ActivationFunctionManager.hpp deleted file mode 120000 index 9d2e63e..0000000 --- a/include/ActivationFunctionManager.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/ActivationFunctionManager.hpp \ No newline at end of file diff --git a/include/ActivationMapUnit.hpp b/include/ActivationMapUnit.hpp deleted file mode 120000 index b0b181e..0000000 --- a/include/ActivationMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/ActivationMapUnit.hpp \ No newline at end of file diff --git a/include/ActivationUnit.hpp b/include/ActivationUnit.hpp deleted file mode 120000 index 307e155..0000000 --- a/include/ActivationUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/ActivationUnit.hpp \ No newline at end of file diff --git a/include/EuclideanDistanceMap.hpp b/include/EuclideanDistanceMap.hpp deleted file mode 120000 index 3fce84a..0000000 --- a/include/EuclideanDistanceMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/EuclideanDistanceMap.hpp \ No newline at end of file diff --git a/include/EuclideanDistanceMapUnit.hpp b/include/EuclideanDistanceMapUnit.hpp deleted file mode 120000 index 3f22015..0000000 --- a/include/EuclideanDistanceMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/EuclideanDistanceMapUnit.hpp \ No newline at end of file diff --git a/include/EuclideanPairDistanceMap.hpp b/include/EuclideanPairDistanceMap.hpp deleted file mode 120000 index 927fa7f..0000000 --- a/include/EuclideanPairDistanceMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/EuclideanPairDistanceMap.hpp \ No newline at end of file diff --git a/include/EuclideanPairDistanceMapUnit.hpp b/include/EuclideanPairDistanceMapUnit.hpp deleted file mode 120000 index 2b83b6b..0000000 --- a/include/EuclideanPairDistanceMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/EuclideanPairDistanceMapUnit.hpp \ No newline at end of file diff --git a/include/FeatureMapLayer.hpp b/include/FeatureMapLayer.hpp deleted file mode 120000 index bb95934..0000000 --- a/include/FeatureMapLayer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/FeatureMapLayer.hpp \ No newline at end of file diff --git a/include/FeatureMapUnit.hpp b/include/FeatureMapUnit.hpp deleted file mode 120000 index 982d82a..0000000 --- a/include/FeatureMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/FeatureMapUnit.hpp \ No newline at end of file diff --git a/include/FedActivationUnit.hpp b/include/FedActivationUnit.hpp deleted file mode 120000 index 7aeff95..0000000 --- a/include/FedActivationUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/FedActivationUnit.hpp \ No newline at end of file diff --git a/include/FedLayer.hpp b/include/FedLayer.hpp deleted file mode 120000 index 2ddbc09..0000000 --- a/include/FedLayer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/layer/FedLayer.hpp \ No newline at end of file diff --git a/include/FedUnit.hpp b/include/FedUnit.hpp deleted file mode 120000 index fec5d8e..0000000 --- a/include/FedUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/FedUnit.hpp \ No newline at end of file diff --git a/include/FeedForwardNeuralNetwork.hpp b/include/FeedForwardNeuralNetwork.hpp deleted file mode 120000 index 6aaefcf..0000000 --- a/include/FeedForwardNeuralNetwork.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/network/FeedForwardNeuralNetwork.hpp \ No newline at end of file diff --git a/include/FeederInterface.hpp b/include/FeederInterface.hpp deleted file mode 120000 index 07af2b2..0000000 --- a/include/FeederInterface.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feeder/FeederInterface.hpp \ No newline at end of file diff --git a/include/GaussianActivationFunction.hpp b/include/GaussianActivationFunction.hpp deleted file mode 120000 index d086109..0000000 --- a/include/GaussianActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/GaussianActivationFunction.hpp \ No newline at end of file diff --git a/include/IdentityActivationFunction.hpp b/include/IdentityActivationFunction.hpp deleted file mode 120000 index 2ee49c8..0000000 --- a/include/IdentityActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/IdentityActivationFunction.hpp \ No newline at end of file diff --git a/include/IdentityMap.hpp b/include/IdentityMap.hpp deleted file mode 120000 index dcd074d..0000000 --- a/include/IdentityMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/IdentityMap.hpp \ No newline at end of file diff --git a/include/IdentityMapUnit.hpp b/include/IdentityMapUnit.hpp deleted file mode 120000 index 5ec4cac..0000000 --- a/include/IdentityMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/IdentityMapUnit.hpp \ No newline at end of file diff --git a/include/InputLayer.hpp b/include/InputLayer.hpp deleted file mode 120000 index f1e0e6d..0000000 --- a/include/InputLayer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/layer/InputLayer.hpp \ No newline at end of file diff --git a/include/InputUnit.hpp b/include/InputUnit.hpp deleted file mode 120000 index 7a8053c..0000000 --- a/include/InputUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/InputUnit.hpp \ No newline at end of file diff --git a/include/LogisticActivationFunction.hpp b/include/LogisticActivationFunction.hpp deleted file mode 120000 index a07a9f2..0000000 --- a/include/LogisticActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/LogisticActivationFunction.hpp \ No newline at end of file diff --git a/include/Makefile.am b/include/Makefile.am deleted file mode 100644 index 1c1aee1..0000000 --- a/include/Makefile.am +++ /dev/null @@ -1,5 +0,0 @@ -include-links: - rm -f *.hpp - ln -s ../src/*/*.hpp ./ - -.PHONY: include-links diff --git a/include/MultiDimStaticMap.hpp b/include/MultiDimStaticMap.hpp deleted file mode 120000 index 47c3af1..0000000 --- a/include/MultiDimStaticMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/MultiDimStaticMap.hpp \ No newline at end of file diff --git a/include/NNLayer.hpp b/include/NNLayer.hpp deleted file mode 120000 index faea353..0000000 --- a/include/NNLayer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/layer/NNLayer.hpp \ No newline at end of file diff --git a/include/NNRay.hpp b/include/NNRay.hpp deleted file mode 120000 index 3cd3534..0000000 --- a/include/NNRay.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feeder/NNRay.hpp \ No newline at end of file diff --git a/include/NNTrainer.hpp b/include/NNTrainer.hpp deleted file mode 120000 index 787c2dd..0000000 --- a/include/NNTrainer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/trainer/NNTrainer.hpp \ No newline at end of file diff --git a/include/NNTrainerGSL.hpp b/include/NNTrainerGSL.hpp deleted file mode 120000 index 40c9d07..0000000 --- a/include/NNTrainerGSL.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/trainer/NNTrainerGSL.hpp \ No newline at end of file diff --git a/include/NNTrainingConfig.hpp b/include/NNTrainingConfig.hpp deleted file mode 120000 index 6402999..0000000 --- a/include/NNTrainingConfig.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/trainer/NNTrainingConfig.hpp \ No newline at end of file diff --git a/include/NNTrainingData.hpp b/include/NNTrainingData.hpp deleted file mode 120000 index 4c282f8..0000000 --- a/include/NNTrainingData.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/trainer/NNTrainingData.hpp \ No newline at end of file diff --git a/include/NNUnit.hpp b/include/NNUnit.hpp deleted file mode 120000 index 1fb45d0..0000000 --- a/include/NNUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/NNUnit.hpp \ No newline at end of file diff --git a/include/NetworkLayer.hpp b/include/NetworkLayer.hpp deleted file mode 120000 index e5bc725..0000000 --- a/include/NetworkLayer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/layer/NetworkLayer.hpp \ No newline at end of file diff --git a/include/NetworkUnit.hpp b/include/NetworkUnit.hpp deleted file mode 120000 index 3bed987..0000000 --- a/include/NetworkUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/NetworkUnit.hpp \ No newline at end of file diff --git a/include/OffsetUnit.hpp b/include/OffsetUnit.hpp deleted file mode 120000 index b09e0e4..0000000 --- a/include/OffsetUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/OffsetUnit.hpp \ No newline at end of file diff --git a/include/OneDimStaticMap.hpp b/include/OneDimStaticMap.hpp deleted file mode 120000 index 62c7050..0000000 --- a/include/OneDimStaticMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/OneDimStaticMap.hpp \ No newline at end of file diff --git a/include/OutputNNLayer.hpp b/include/OutputNNLayer.hpp deleted file mode 120000 index 8f139e8..0000000 --- a/include/OutputNNLayer.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/layer/OutputNNLayer.hpp \ No newline at end of file diff --git a/include/OutputNNUnit.hpp b/include/OutputNNUnit.hpp deleted file mode 120000 index 3dd2df5..0000000 --- a/include/OutputNNUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/OutputNNUnit.hpp \ No newline at end of file diff --git a/include/PairDifferenceMap.hpp b/include/PairDifferenceMap.hpp deleted file mode 120000 index 219f5e6..0000000 --- a/include/PairDifferenceMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/PairDifferenceMap.hpp \ No newline at end of file diff --git a/include/PairDifferenceMapUnit.hpp b/include/PairDifferenceMapUnit.hpp deleted file mode 120000 index 979b4d7..0000000 --- a/include/PairDifferenceMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/PairDifferenceMapUnit.hpp \ No newline at end of file diff --git a/include/PairSumMap.hpp b/include/PairSumMap.hpp deleted file mode 120000 index 87936cf..0000000 --- a/include/PairSumMap.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/PairSumMap.hpp \ No newline at end of file diff --git a/include/PairSumMapUnit.hpp b/include/PairSumMapUnit.hpp deleted file mode 120000 index 4cf64f8..0000000 --- a/include/PairSumMapUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feature_maps/PairSumMapUnit.hpp \ No newline at end of file diff --git a/include/PrintUtilities.hpp b/include/PrintUtilities.hpp deleted file mode 120000 index fe7bb2b..0000000 --- a/include/PrintUtilities.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/io/PrintUtilities.hpp \ No newline at end of file diff --git a/include/README.md b/include/README.md deleted file mode 100644 index 2930f78..0000000 --- a/include/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# include - -In this folder we maintain symbolic links to all headers in src, to allow flat include statements. - -If you add or remove source files in src, you should run the following from project root: - `make update-sources` - -This will update the lib and include folders to a changed src. diff --git a/include/ReLUActivationFunction.hpp b/include/ReLUActivationFunction.hpp deleted file mode 120000 index 351f3aa..0000000 --- a/include/ReLUActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/ReLUActivationFunction.hpp \ No newline at end of file diff --git a/include/SELUActivationFunction.hpp b/include/SELUActivationFunction.hpp deleted file mode 120000 index 1577ff5..0000000 --- a/include/SELUActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/SELUActivationFunction.hpp \ No newline at end of file diff --git a/include/SRLUActivationFunction.hpp b/include/SRLUActivationFunction.hpp deleted file mode 120000 index 4c112a4..0000000 --- a/include/SRLUActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/SRLUActivationFunction.hpp \ No newline at end of file diff --git a/include/SerializableComponent.hpp b/include/SerializableComponent.hpp deleted file mode 120000 index 2f3d283..0000000 --- a/include/SerializableComponent.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/serialize/SerializableComponent.hpp \ No newline at end of file diff --git a/include/ShifterScalerNNUnit.hpp b/include/ShifterScalerNNUnit.hpp deleted file mode 120000 index f62507f..0000000 --- a/include/ShifterScalerNNUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/ShifterScalerNNUnit.hpp \ No newline at end of file diff --git a/include/ShifterScalerUnit.hpp b/include/ShifterScalerUnit.hpp deleted file mode 120000 index 88632e9..0000000 --- a/include/ShifterScalerUnit.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/unit/ShifterScalerUnit.hpp \ No newline at end of file diff --git a/include/SineActivationFunction.hpp b/include/SineActivationFunction.hpp deleted file mode 120000 index 88a719b..0000000 --- a/include/SineActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/SineActivationFunction.hpp \ No newline at end of file diff --git a/include/SmartBetaGenerator.hpp b/include/SmartBetaGenerator.hpp deleted file mode 120000 index 166b4ec..0000000 --- a/include/SmartBetaGenerator.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feeder/SmartBetaGenerator.hpp \ No newline at end of file diff --git a/include/StaticFeeder.hpp b/include/StaticFeeder.hpp deleted file mode 120000 index e1e9450..0000000 --- a/include/StaticFeeder.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feeder/StaticFeeder.hpp \ No newline at end of file diff --git a/include/StringCodeUtilities.hpp b/include/StringCodeUtilities.hpp deleted file mode 120000 index 1c74df8..0000000 --- a/include/StringCodeUtilities.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/serialize/StringCodeUtilities.hpp \ No newline at end of file diff --git a/include/TanSigmoidActivationFunction.hpp b/include/TanSigmoidActivationFunction.hpp deleted file mode 120000 index b0dce7a..0000000 --- a/include/TanSigmoidActivationFunction.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/actf/TanSigmoidActivationFunction.hpp \ No newline at end of file diff --git a/include/VariableFeeder.hpp b/include/VariableFeeder.hpp deleted file mode 120000 index a535164..0000000 --- a/include/VariableFeeder.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feeder/VariableFeeder.hpp \ No newline at end of file diff --git a/include/WeightedFeeder.hpp b/include/WeightedFeeder.hpp deleted file mode 120000 index 7d4516e..0000000 --- a/include/WeightedFeeder.hpp +++ /dev/null @@ -1 +0,0 @@ -../src/feeder/WeightedFeeder.hpp \ No newline at end of file diff --git a/include/ffnn b/include/ffnn deleted file mode 120000 index 6a04314..0000000 --- a/include/ffnn +++ /dev/null @@ -1 +0,0 @@ -./ \ No newline at end of file diff --git a/src/actf/ActivationFunctionInterface.hpp b/include/ffnn/actf/ActivationFunctionInterface.hpp similarity index 98% rename from src/actf/ActivationFunctionInterface.hpp rename to include/ffnn/actf/ActivationFunctionInterface.hpp index 0feb448..77791c1 100644 --- a/src/actf/ActivationFunctionInterface.hpp +++ b/include/ffnn/actf/ActivationFunctionInterface.hpp @@ -1,7 +1,7 @@ #ifndef ACTIVATION_FUNCTION_INTERFACE #define ACTIVATION_FUNCTION_INTERFACE -#include "SerializableComponent.hpp" +#include "ffnn/serial/SerializableComponent.hpp" #include #include diff --git a/src/actf/ActivationFunctionManager.hpp b/include/ffnn/actf/ActivationFunctionManager.hpp similarity index 57% rename from src/actf/ActivationFunctionManager.hpp rename to include/ffnn/actf/ActivationFunctionManager.hpp index 2184931..5ee0c70 100644 --- a/src/actf/ActivationFunctionManager.hpp +++ b/include/ffnn/actf/ActivationFunctionManager.hpp @@ -2,15 +2,16 @@ #define ACTIVATION_FUNCTION_MANAGER -#include "ActivationFunctionInterface.hpp" -#include "IdentityActivationFunction.hpp" -#include "LogisticActivationFunction.hpp" -#include "GaussianActivationFunction.hpp" -#include "TanSigmoidActivationFunction.hpp" -#include "ReLUActivationFunction.hpp" -#include "SELUActivationFunction.hpp" -#include "SRLUActivationFunction.hpp" -#include "SineActivationFunction.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/IdentityActivationFunction.hpp" +#include "ffnn/actf/LogisticActivationFunction.hpp" +#include "ffnn/actf/GaussianActivationFunction.hpp" +#include "ffnn/actf/TanSigmoidActivationFunction.hpp" +#include "ffnn/actf/ReLUActivationFunction.hpp" +#include "ffnn/actf/SELUActivationFunction.hpp" +#include "ffnn/actf/SRLUActivationFunction.hpp" +#include "ffnn/actf/SineActivationFunction.hpp" +#include "ffnn/actf/ExponentialActivationFunction.hpp" #include #include @@ -26,6 +27,7 @@ namespace std_actf{ extern SELUActivationFunction selu_actf; extern SRLUActivationFunction srlu_actf; extern SineActivationFunction sin_actf; + extern ExponentialActivationFunction exp_actf; extern std::vector supported_actf; diff --git a/include/ffnn/actf/ExponentialActivationFunction.hpp b/include/ffnn/actf/ExponentialActivationFunction.hpp new file mode 100644 index 0000000..6504e90 --- /dev/null +++ b/include/ffnn/actf/ExponentialActivationFunction.hpp @@ -0,0 +1,33 @@ +#ifndef EXPONENTIAL_ACTIVATION_FUNCTION +#define EXPONENTIAL_ACTIVATION_FUNCTION + +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include + +class ExponentialActivationFunction: public ActivationFunctionInterface +{ +public: + // getters + ActivationFunctionInterface * getCopy(){return new ExponentialActivationFunction();} + std::string getIdCode(){return "EXP";} + + // input should be in the range [-1 : 1] -> mu=0 sigma=1/sqrt(3) + double getIdealInputMu(){return 0.;} + double getIdealInputSigma(){return 0.577350269189626;} + + // we can use default implementation for output mu/sigma + + // computation + double f(const double &in); + + double f1d(const double &in); + + double f2d(const double &in); + + double f3d(const double &in); + + void fad(const double &in, double &v, double &v1d, double &v2d, double &v3d, const bool flag_d1 = false, const bool flag_d2 = false, const bool flag_d3 = false); +}; + + +#endif diff --git a/src/actf/GaussianActivationFunction.hpp b/include/ffnn/actf/GaussianActivationFunction.hpp similarity index 95% rename from src/actf/GaussianActivationFunction.hpp rename to include/ffnn/actf/GaussianActivationFunction.hpp index 078af2f..8831f63 100644 --- a/src/actf/GaussianActivationFunction.hpp +++ b/include/ffnn/actf/GaussianActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef GAUSSIAN_ACTIVATION_FUNCTION #define GAUSSIAN_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include diff --git a/src/actf/IdentityActivationFunction.hpp b/include/ffnn/actf/IdentityActivationFunction.hpp similarity index 95% rename from src/actf/IdentityActivationFunction.hpp rename to include/ffnn/actf/IdentityActivationFunction.hpp index 0a2cf17..9a99f38 100644 --- a/src/actf/IdentityActivationFunction.hpp +++ b/include/ffnn/actf/IdentityActivationFunction.hpp @@ -3,7 +3,7 @@ #include -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" class IdentityActivationFunction: public ActivationFunctionInterface { diff --git a/src/actf/LogisticActivationFunction.hpp b/include/ffnn/actf/LogisticActivationFunction.hpp similarity index 94% rename from src/actf/LogisticActivationFunction.hpp rename to include/ffnn/actf/LogisticActivationFunction.hpp index cea269f..178d4cd 100644 --- a/src/actf/LogisticActivationFunction.hpp +++ b/include/ffnn/actf/LogisticActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef LOGISTIC_ACTIVATION_FUNCTION #define LOGISTIC_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include class LogisticActivationFunction: public ActivationFunctionInterface diff --git a/src/actf/ReLUActivationFunction.hpp b/include/ffnn/actf/ReLUActivationFunction.hpp similarity index 96% rename from src/actf/ReLUActivationFunction.hpp rename to include/ffnn/actf/ReLUActivationFunction.hpp index aff9943..0778bd0 100644 --- a/src/actf/ReLUActivationFunction.hpp +++ b/include/ffnn/actf/ReLUActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef RELU_ACTIVATION_FUNCTION #define RELU_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include diff --git a/src/actf/SELUActivationFunction.hpp b/include/ffnn/actf/SELUActivationFunction.hpp similarity index 96% rename from src/actf/SELUActivationFunction.hpp rename to include/ffnn/actf/SELUActivationFunction.hpp index 289b107..3b70d5e 100644 --- a/src/actf/SELUActivationFunction.hpp +++ b/include/ffnn/actf/SELUActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef SELU_ACTIVATION_FUNCTION #define SELU_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include class SELUActivationFunction: public ActivationFunctionInterface diff --git a/src/actf/SRLUActivationFunction.hpp b/include/ffnn/actf/SRLUActivationFunction.hpp similarity index 94% rename from src/actf/SRLUActivationFunction.hpp rename to include/ffnn/actf/SRLUActivationFunction.hpp index 88fc230..634c2b5 100644 --- a/src/actf/SRLUActivationFunction.hpp +++ b/include/ffnn/actf/SRLUActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef SRLU_ACTIVATION_FUNCTION #define SRLU_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include // Smooth Rectified Linear Unit ( == ln(1+exp(x)) ) diff --git a/src/actf/SineActivationFunction.hpp b/include/ffnn/actf/SineActivationFunction.hpp similarity index 95% rename from src/actf/SineActivationFunction.hpp rename to include/ffnn/actf/SineActivationFunction.hpp index c2a491b..7f70719 100644 --- a/src/actf/SineActivationFunction.hpp +++ b/include/ffnn/actf/SineActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef SINE_ACTIVATION_FUNCTION #define SINE_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include diff --git a/src/actf/TanSigmoidActivationFunction.hpp b/include/ffnn/actf/TanSigmoidActivationFunction.hpp similarity index 94% rename from src/actf/TanSigmoidActivationFunction.hpp rename to include/ffnn/actf/TanSigmoidActivationFunction.hpp index 2c78102..41d2173 100644 --- a/src/actf/TanSigmoidActivationFunction.hpp +++ b/include/ffnn/actf/TanSigmoidActivationFunction.hpp @@ -1,7 +1,7 @@ #ifndef TANSIGMOID_ACTIVATION_FUNCTION #define TANSIGMOID_ACTIVATION_FUNCTION -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include diff --git a/src/feeder/FeederInterface.hpp b/include/ffnn/feed/FeederInterface.hpp similarity index 99% rename from src/feeder/FeederInterface.hpp rename to include/ffnn/feed/FeederInterface.hpp index 0e98d5d..3d68c9c 100644 --- a/src/feeder/FeederInterface.hpp +++ b/include/ffnn/feed/FeederInterface.hpp @@ -1,7 +1,7 @@ #ifndef FEEDER_INTERFACE #define FEEDER_INTERFACE -#include "SerializableComponent.hpp" +#include "ffnn/serial/SerializableComponent.hpp" #include #include diff --git a/src/feeder/NNRay.hpp b/include/ffnn/feed/NNRay.hpp similarity index 92% rename from src/feeder/NNRay.hpp rename to include/ffnn/feed/NNRay.hpp index 8ec582f..3962d5a 100644 --- a/src/feeder/NNRay.hpp +++ b/include/ffnn/feed/NNRay.hpp @@ -1,8 +1,8 @@ #ifndef NN_RAY #define NN_RAY -#include "WeightedFeeder.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/feed/WeightedFeeder.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include diff --git a/src/feeder/SmartBetaGenerator.hpp b/include/ffnn/feed/SmartBetaGenerator.hpp similarity index 86% rename from src/feeder/SmartBetaGenerator.hpp rename to include/ffnn/feed/SmartBetaGenerator.hpp index 6fb2751..5c283b2 100644 --- a/src/feeder/SmartBetaGenerator.hpp +++ b/include/ffnn/feed/SmartBetaGenerator.hpp @@ -1,10 +1,10 @@ #ifndef SMART_BETA_GENERATOR #define SMART_BETA_GENERATOR -#include "FeedForwardNeuralNetwork.hpp" -#include "FedLayer.hpp" -#include "FeederInterface.hpp" -#include "NNRay.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/layer/FedLayer.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/feed/NNRay.hpp" namespace smart_beta { diff --git a/src/feeder/StaticFeeder.hpp b/include/ffnn/feed/StaticFeeder.hpp similarity index 94% rename from src/feeder/StaticFeeder.hpp rename to include/ffnn/feed/StaticFeeder.hpp index 9d809fb..4de935c 100644 --- a/src/feeder/StaticFeeder.hpp +++ b/include/ffnn/feed/StaticFeeder.hpp @@ -1,9 +1,9 @@ #ifndef STATIC_FEEDER #define STATIC_FEEDER -#include "FeederInterface.hpp" -#include "NetworkUnit.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include diff --git a/src/feeder/VariableFeeder.hpp b/include/ffnn/feed/VariableFeeder.hpp similarity index 90% rename from src/feeder/VariableFeeder.hpp rename to include/ffnn/feed/VariableFeeder.hpp index 64315e5..bb2c6f7 100644 --- a/src/feeder/VariableFeeder.hpp +++ b/include/ffnn/feed/VariableFeeder.hpp @@ -1,9 +1,9 @@ #ifndef VARIABLE_FEEDER #define VARIABLE_FEEDER -#include "FeederInterface.hpp" -#include "NetworkUnit.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include diff --git a/src/feeder/WeightedFeeder.hpp b/include/ffnn/feed/WeightedFeeder.hpp similarity index 91% rename from src/feeder/WeightedFeeder.hpp rename to include/ffnn/feed/WeightedFeeder.hpp index 9a46afa..6f56d3c 100644 --- a/src/feeder/WeightedFeeder.hpp +++ b/include/ffnn/feed/WeightedFeeder.hpp @@ -1,9 +1,9 @@ #ifndef WEIGHTED_FEEDER #define WEIGHTED_FEEDER -#include "VariableFeeder.hpp" -#include "NetworkUnit.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/feed/VariableFeeder.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include diff --git a/src/feature_maps/ActivationMapUnit.hpp b/include/ffnn/fmap/ActivationMapUnit.hpp similarity index 94% rename from src/feature_maps/ActivationMapUnit.hpp rename to include/ffnn/fmap/ActivationMapUnit.hpp index c89e26f..f47c943 100644 --- a/src/feature_maps/ActivationMapUnit.hpp +++ b/include/ffnn/fmap/ActivationMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef ACTIVATION_MAP_UNIT #define ACTIVATION_MAP_UNIT -#include "FedActivationUnit.hpp" -#include "FeederInterface.hpp" +#include "ffnn/unit/FedActivationUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" #include #include diff --git a/src/feature_maps/EuclideanDistanceMap.hpp b/include/ffnn/fmap/EuclideanDistanceMap.hpp similarity index 95% rename from src/feature_maps/EuclideanDistanceMap.hpp rename to include/ffnn/fmap/EuclideanDistanceMap.hpp index 8d56525..3f7e6e7 100644 --- a/src/feature_maps/EuclideanDistanceMap.hpp +++ b/include/ffnn/fmap/EuclideanDistanceMap.hpp @@ -1,8 +1,8 @@ #ifndef EUCLIDEAN_DISTANCE_MAP #define EUCLIDEAN_DISTANCE_MAP -#include "MultiDimStaticMap.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/fmap/MultiDimStaticMap.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include diff --git a/src/feature_maps/EuclideanDistanceMapUnit.hpp b/include/ffnn/fmap/EuclideanDistanceMapUnit.hpp similarity index 77% rename from src/feature_maps/EuclideanDistanceMapUnit.hpp rename to include/ffnn/fmap/EuclideanDistanceMapUnit.hpp index d0726bb..6df2d51 100644 --- a/src/feature_maps/EuclideanDistanceMapUnit.hpp +++ b/include/ffnn/fmap/EuclideanDistanceMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef EUCLIDEAN_DISTANCE_MAP_UNIT #define EUCLIDEAN_DISTANCE_MAP_UNIT -#include "FeatureMapUnit.hpp" -#include "EuclideanDistanceMap.hpp" +#include "ffnn/fmap/FeatureMapUnit.hpp" +#include "ffnn/fmap/EuclideanDistanceMap.hpp" #include diff --git a/src/feature_maps/EuclideanPairDistanceMap.hpp b/include/ffnn/fmap/EuclideanPairDistanceMap.hpp similarity index 94% rename from src/feature_maps/EuclideanPairDistanceMap.hpp rename to include/ffnn/fmap/EuclideanPairDistanceMap.hpp index cb76f5b..42a7a6a 100644 --- a/src/feature_maps/EuclideanPairDistanceMap.hpp +++ b/include/ffnn/fmap/EuclideanPairDistanceMap.hpp @@ -1,8 +1,8 @@ #ifndef EUCLIDEAN_PAIR_DISTANCE_MAP #define EUCLIDEAN_PAIR_DISTANCE_MAP -#include "MultiDimStaticMap.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/fmap/MultiDimStaticMap.hpp" +#include "ffnn/layer/NetworkLayer.hpp" // takes coordinates on input side and calculates squared euclidean distance of a pair class EuclideanPairDistanceMap: public MultiDimStaticMap diff --git a/src/feature_maps/EuclideanPairDistanceMapUnit.hpp b/include/ffnn/fmap/EuclideanPairDistanceMapUnit.hpp similarity index 77% rename from src/feature_maps/EuclideanPairDistanceMapUnit.hpp rename to include/ffnn/fmap/EuclideanPairDistanceMapUnit.hpp index 704944b..04e3ec0 100644 --- a/src/feature_maps/EuclideanPairDistanceMapUnit.hpp +++ b/include/ffnn/fmap/EuclideanPairDistanceMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef EUCLIDEAN_PAIR_DISTANCE_MAP_UNIT #define EUCLIDEAN_PAIR_DISTANCE_MAP_UNIT -#include "FeatureMapUnit.hpp" -#include "EuclideanPairDistanceMap.hpp" +#include "ffnn/fmap/FeatureMapUnit.hpp" +#include "ffnn/fmap/EuclideanPairDistanceMap.hpp" #include diff --git a/src/feature_maps/FeatureMapLayer.hpp b/include/ffnn/fmap/FeatureMapLayer.hpp similarity index 89% rename from src/feature_maps/FeatureMapLayer.hpp rename to include/ffnn/fmap/FeatureMapLayer.hpp index b6aaaeb..c8878bc 100644 --- a/src/feature_maps/FeatureMapLayer.hpp +++ b/include/ffnn/fmap/FeatureMapLayer.hpp @@ -1,15 +1,15 @@ #ifndef FEATURE_MAP_LAYER #define FEATURE_MAP_LAYER -#include "NetworkLayer.hpp" -#include "FedLayer.hpp" -#include "FedUnit.hpp" - -#include "PairSumMapUnit.hpp" -#include "PairDifferenceMapUnit.hpp" -#include "EuclideanDistanceMapUnit.hpp" -#include "EuclideanPairDistanceMapUnit.hpp" -#include "IdentityMapUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/layer/FedLayer.hpp" +#include "ffnn/unit/FedUnit.hpp" + +#include "ffnn/fmap/PairSumMapUnit.hpp" +#include "ffnn/fmap/PairDifferenceMapUnit.hpp" +#include "ffnn/fmap/EuclideanDistanceMapUnit.hpp" +#include "ffnn/fmap/EuclideanPairDistanceMapUnit.hpp" +#include "ffnn/fmap/IdentityMapUnit.hpp" class FeatureMapLayer: public FedLayer { diff --git a/src/feature_maps/FeatureMapUnit.hpp b/include/ffnn/fmap/FeatureMapUnit.hpp similarity index 94% rename from src/feature_maps/FeatureMapUnit.hpp rename to include/ffnn/fmap/FeatureMapUnit.hpp index c6bf108..0e1f18f 100644 --- a/src/feature_maps/FeatureMapUnit.hpp +++ b/include/ffnn/fmap/FeatureMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef FEATURE_MAP_UNIT #define FEATURE_MAP_UNIT -#include "FedUnit.hpp" -#include "FeederInterface.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" #include #include diff --git a/src/feature_maps/IdentityMap.hpp b/include/ffnn/fmap/IdentityMap.hpp similarity index 92% rename from src/feature_maps/IdentityMap.hpp rename to include/ffnn/fmap/IdentityMap.hpp index 3dd8314..b93cb18 100644 --- a/src/feature_maps/IdentityMap.hpp +++ b/include/ffnn/fmap/IdentityMap.hpp @@ -1,9 +1,9 @@ #ifndef IDENTITY_MAP #define IDENTITY_MAP -#include "OneDimStaticMap.hpp" -#include "NetworkUnit.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/fmap/OneDimStaticMap.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include // NULL diff --git a/src/feature_maps/IdentityMapUnit.hpp b/include/ffnn/fmap/IdentityMapUnit.hpp similarity index 76% rename from src/feature_maps/IdentityMapUnit.hpp rename to include/ffnn/fmap/IdentityMapUnit.hpp index e1ff473..af9bb68 100644 --- a/src/feature_maps/IdentityMapUnit.hpp +++ b/include/ffnn/fmap/IdentityMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef IDENTITY_MAP_UNIT #define IDENTITY_MAP_UNIT -#include "FeatureMapUnit.hpp" -#include "IdentityMap.hpp" +#include "ffnn/fmap/FeatureMapUnit.hpp" +#include "ffnn/fmap/IdentityMap.hpp" #include diff --git a/src/feature_maps/MultiDimStaticMap.hpp b/include/ffnn/fmap/MultiDimStaticMap.hpp similarity index 90% rename from src/feature_maps/MultiDimStaticMap.hpp rename to include/ffnn/fmap/MultiDimStaticMap.hpp index 17128bb..4317ce3 100644 --- a/src/feature_maps/MultiDimStaticMap.hpp +++ b/include/ffnn/fmap/MultiDimStaticMap.hpp @@ -1,8 +1,8 @@ #ifndef MULTI_DIM_STATIC_MAP #define MULTI_DIM_STATIC_MAP -#include "StaticFeeder.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/feed/StaticFeeder.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include diff --git a/src/feature_maps/OneDimStaticMap.hpp b/include/ffnn/fmap/OneDimStaticMap.hpp similarity index 89% rename from src/feature_maps/OneDimStaticMap.hpp rename to include/ffnn/fmap/OneDimStaticMap.hpp index 4f00485..e12c397 100644 --- a/src/feature_maps/OneDimStaticMap.hpp +++ b/include/ffnn/fmap/OneDimStaticMap.hpp @@ -1,8 +1,8 @@ #ifndef ONE_DIM_STATIC_MAP #define ONE_DIM_STATIC_MAP -#include "StaticFeeder.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/feed/StaticFeeder.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include diff --git a/src/feature_maps/PairDifferenceMap.hpp b/include/ffnn/fmap/PairDifferenceMap.hpp similarity index 93% rename from src/feature_maps/PairDifferenceMap.hpp rename to include/ffnn/fmap/PairDifferenceMap.hpp index 128a93d..0ea7c67 100644 --- a/src/feature_maps/PairDifferenceMap.hpp +++ b/include/ffnn/fmap/PairDifferenceMap.hpp @@ -1,9 +1,9 @@ #ifndef PAIR_DIFFERENCE_MAP #define PAIR_DIFFERENCE_MAP -#include "OneDimStaticMap.hpp" -#include "NetworkUnit.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/fmap/OneDimStaticMap.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include // NULL diff --git a/src/feature_maps/PairDifferenceMapUnit.hpp b/include/ffnn/fmap/PairDifferenceMapUnit.hpp similarity index 77% rename from src/feature_maps/PairDifferenceMapUnit.hpp rename to include/ffnn/fmap/PairDifferenceMapUnit.hpp index e310e3e..def2bc9 100644 --- a/src/feature_maps/PairDifferenceMapUnit.hpp +++ b/include/ffnn/fmap/PairDifferenceMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef PAIR_DIFFERENCE_MAP_UNIT #define PAIR_DIFFERENCE_MAP_UNIT -#include "FeatureMapUnit.hpp" -#include "PairDifferenceMap.hpp" +#include "ffnn/fmap/FeatureMapUnit.hpp" +#include "ffnn/fmap/PairDifferenceMap.hpp" #include diff --git a/src/feature_maps/PairSumMap.hpp b/include/ffnn/fmap/PairSumMap.hpp similarity index 92% rename from src/feature_maps/PairSumMap.hpp rename to include/ffnn/fmap/PairSumMap.hpp index ecf3925..c7f7de5 100644 --- a/src/feature_maps/PairSumMap.hpp +++ b/include/ffnn/fmap/PairSumMap.hpp @@ -1,9 +1,9 @@ #ifndef PAIR_SUM_MAP #define PAIR_SUM_MAP -#include "OneDimStaticMap.hpp" -#include "NetworkUnit.hpp" -#include "NetworkLayer.hpp" +#include "ffnn/fmap/OneDimStaticMap.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" #include #include // NULL diff --git a/src/feature_maps/PairSumMapUnit.hpp b/include/ffnn/fmap/PairSumMapUnit.hpp similarity index 76% rename from src/feature_maps/PairSumMapUnit.hpp rename to include/ffnn/fmap/PairSumMapUnit.hpp index 9b47013..eab6798 100644 --- a/src/feature_maps/PairSumMapUnit.hpp +++ b/include/ffnn/fmap/PairSumMapUnit.hpp @@ -1,8 +1,8 @@ #ifndef PAIR_SUM_MAP_UNIT #define PAIR_SUM_MAP_UNIT -#include "FeatureMapUnit.hpp" -#include "PairSumMap.hpp" +#include "ffnn/fmap/FeatureMapUnit.hpp" +#include "ffnn/fmap/PairSumMap.hpp" #include diff --git a/src/io/PrintUtilities.hpp b/include/ffnn/io/PrintUtilities.hpp similarity index 94% rename from src/io/PrintUtilities.hpp rename to include/ffnn/io/PrintUtilities.hpp index 875beab..d6bb1dd 100644 --- a/src/io/PrintUtilities.hpp +++ b/include/ffnn/io/PrintUtilities.hpp @@ -2,7 +2,7 @@ #define PRINT_UTILITIES -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" #include diff --git a/src/layer/FedLayer.hpp b/include/ffnn/layer/FedLayer.hpp similarity index 88% rename from src/layer/FedLayer.hpp rename to include/ffnn/layer/FedLayer.hpp index 171ef10..0728104 100644 --- a/src/layer/FedLayer.hpp +++ b/include/ffnn/layer/FedLayer.hpp @@ -1,8 +1,8 @@ #ifndef FED_NETWORK_LAYER #define FED_NETWORK_LAYER -#include "FedUnit.hpp" -#include "NNRay.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/feed/NNRay.hpp" #include @@ -42,6 +42,9 @@ class FedLayer: public NetworkLayer virtual FeederInterface * connectUnitOnTopOfLayer(NetworkLayer * nl, const int &i) = 0; // should create and return the feeder for the given unit void connectOnTopOfLayer(NetworkLayer * nl); void disconnect(); + + // --- Computation + void computeValues(); // overriding to add OMP pragma }; #endif diff --git a/src/layer/InputLayer.hpp b/include/ffnn/layer/InputLayer.hpp similarity index 91% rename from src/layer/InputLayer.hpp rename to include/ffnn/layer/InputLayer.hpp index c719822..6e96825 100644 --- a/src/layer/InputLayer.hpp +++ b/include/ffnn/layer/InputLayer.hpp @@ -1,8 +1,8 @@ #ifndef INPUT_LAYER #define INPUT_LAYER -#include "NetworkLayer.hpp" -#include "InputUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/unit/InputUnit.hpp" #include #include diff --git a/src/layer/NNLayer.hpp b/include/ffnn/layer/NNLayer.hpp similarity index 80% rename from src/layer/NNLayer.hpp rename to include/ffnn/layer/NNLayer.hpp index a871ac8..7740964 100644 --- a/src/layer/NNLayer.hpp +++ b/include/ffnn/layer/NNLayer.hpp @@ -1,13 +1,13 @@ #ifndef NN_LAYER #define NN_LAYER -#include "FedLayer.hpp" -#include "NetworkLayer.hpp" -#include "NNUnit.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" -#include "FeederInterface.hpp" -#include "NNRay.hpp" +#include "ffnn/layer/FedLayer.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/unit/NNUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/feed/NNRay.hpp" #include #include diff --git a/src/layer/NetworkLayer.hpp b/include/ffnn/layer/NetworkLayer.hpp similarity index 86% rename from src/layer/NetworkLayer.hpp rename to include/ffnn/layer/NetworkLayer.hpp index 136bb9b..63afd4f 100644 --- a/src/layer/NetworkLayer.hpp +++ b/include/ffnn/layer/NetworkLayer.hpp @@ -1,10 +1,10 @@ #ifndef NETWORK_LAYER #define NETWORK_LAYER -#include "SerializableComponent.hpp" -#include "StringCodeUtilities.hpp" -#include "NetworkUnit.hpp" -#include "OffsetUnit.hpp" +#include "ffnn/serial/SerializableComponent.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/unit/OffsetUnit.hpp" #include #include @@ -62,9 +62,9 @@ class NetworkLayer: public SerializableComponent // --- Values to compute - void addCrossSecondDerivativeSubstrate(const int &nx0, const int &nvp); - void addCrossFirstDerivativeSubstrate(const int &nx0, const int &nvp); - void addVariationalFirstDerivativeSubstrate(const int &nvp); + void addCrossSecondDerivativeSubstrate(const int &nx0); + void addCrossFirstDerivativeSubstrate(const int &nx0); + void addVariationalFirstDerivativeSubstrate(); void addSecondDerivativeSubstrate(const int &nx0); void addFirstDerivativeSubstrate(const int &nx0); diff --git a/src/layer/OutputNNLayer.hpp b/include/ffnn/layer/OutputNNLayer.hpp similarity index 84% rename from src/layer/OutputNNLayer.hpp rename to include/ffnn/layer/OutputNNLayer.hpp index 935486f..1fb69f4 100644 --- a/src/layer/OutputNNLayer.hpp +++ b/include/ffnn/layer/OutputNNLayer.hpp @@ -1,10 +1,10 @@ #ifndef OUTPUT_NN_LAYER #define OUTPUT_NN_LAYER -#include "NNLayer.hpp" -#include "OutputNNUnit.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" +#include "ffnn/layer/NNLayer.hpp" +#include "ffnn/unit/OutputNNUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" #include #include diff --git a/src/network/FeedForwardNeuralNetwork.hpp b/include/ffnn/net/FeedForwardNeuralNetwork.hpp similarity index 96% rename from src/network/FeedForwardNeuralNetwork.hpp rename to include/ffnn/net/FeedForwardNeuralNetwork.hpp index c6ac107..5cf90b7 100644 --- a/src/network/FeedForwardNeuralNetwork.hpp +++ b/include/ffnn/net/FeedForwardNeuralNetwork.hpp @@ -1,14 +1,14 @@ #ifndef FEED_FORWARD_NEURAL_NETWORK #define FEED_FORWARD_NEURAL_NETWORK -#include "ActivationFunctionInterface.hpp" -#include "NetworkLayer.hpp" -#include "InputLayer.hpp" -#include "FedLayer.hpp" -#include "NNLayer.hpp" -#include "OutputNNLayer.hpp" -#include "FeatureMapLayer.hpp" -#include "NetworkUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/layer/InputLayer.hpp" +#include "ffnn/layer/FedLayer.hpp" +#include "ffnn/layer/NNLayer.hpp" +#include "ffnn/layer/OutputNNLayer.hpp" +#include "ffnn/fmap/FeatureMapLayer.hpp" +#include "ffnn/unit/NetworkUnit.hpp" #include #include diff --git a/src/serialize/SerializableComponent.hpp b/include/ffnn/serial/SerializableComponent.hpp similarity index 93% rename from src/serialize/SerializableComponent.hpp rename to include/ffnn/serial/SerializableComponent.hpp index 8c84b61..cd7fb86 100644 --- a/src/serialize/SerializableComponent.hpp +++ b/include/ffnn/serial/SerializableComponent.hpp @@ -1,7 +1,7 @@ #ifndef SERIALIZABLE_COMPONENT #define SERIALIZABLE_COMPONENT -#include "StringCodeUtilities.hpp" // for functions on stringCodes, look there for documentation about stringCodes +#include "ffnn/serial/StringCodeUtilities.hpp" // for functions on stringCodes, look there for documentation about stringCodes #include diff --git a/src/serialize/StringCodeUtilities.hpp b/include/ffnn/serial/StringCodeUtilities.hpp similarity index 100% rename from src/serialize/StringCodeUtilities.hpp rename to include/ffnn/serial/StringCodeUtilities.hpp diff --git a/src/trainer/NNTrainer.hpp b/include/ffnn/train/NNTrainer.hpp similarity index 94% rename from src/trainer/NNTrainer.hpp rename to include/ffnn/train/NNTrainer.hpp index 25ca0b2..36cf4d3 100644 --- a/src/trainer/NNTrainer.hpp +++ b/include/ffnn/train/NNTrainer.hpp @@ -1,10 +1,10 @@ #ifndef NN_TRAINER #define NN_TRAINER -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" -#include "NNTrainingData.hpp" -#include "NNTrainingConfig.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" +#include "ffnn/train/NNTrainingData.hpp" +#include "ffnn/train/NNTrainingConfig.hpp" #include #include // NULL diff --git a/src/trainer/NNTrainerGSL.hpp b/include/ffnn/train/NNTrainerGSL.hpp similarity index 95% rename from src/trainer/NNTrainerGSL.hpp rename to include/ffnn/train/NNTrainerGSL.hpp index 7769254..60b380b 100644 --- a/src/trainer/NNTrainerGSL.hpp +++ b/include/ffnn/train/NNTrainerGSL.hpp @@ -1,10 +1,10 @@ #ifndef NN_TRAINER_GSL #define NN_TRAINER_GSL -#include "NNTrainer.hpp" -#include "NNTrainingData.hpp" -#include "NNTrainingConfig.hpp" -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/train/NNTrainer.hpp" +#include "ffnn/train/NNTrainingData.hpp" +#include "ffnn/train/NNTrainingConfig.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" #include diff --git a/src/trainer/NNTrainingConfig.hpp b/include/ffnn/train/NNTrainingConfig.hpp similarity index 88% rename from src/trainer/NNTrainingConfig.hpp rename to include/ffnn/train/NNTrainingConfig.hpp index 0d7064e..9bf7f84 100644 --- a/src/trainer/NNTrainingConfig.hpp +++ b/include/ffnn/train/NNTrainingConfig.hpp @@ -1,7 +1,7 @@ #ifndef NN_TRAINING_CONFIG #define NN_TRAINING_CONFIG -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" // holds the required configuration parameters for the trainer struct NNTrainingConfig { diff --git a/src/trainer/NNTrainingData.hpp b/include/ffnn/train/NNTrainingData.hpp similarity index 100% rename from src/trainer/NNTrainingData.hpp rename to include/ffnn/train/NNTrainingData.hpp diff --git a/src/unit/ActivationUnit.hpp b/include/ffnn/unit/ActivationUnit.hpp similarity index 93% rename from src/unit/ActivationUnit.hpp rename to include/ffnn/unit/ActivationUnit.hpp index 13f1e6d..493f28e 100644 --- a/src/unit/ActivationUnit.hpp +++ b/include/ffnn/unit/ActivationUnit.hpp @@ -1,9 +1,9 @@ #ifndef ACTIVATION_UNIT #define ACTIVATION_UNIT -#include "NetworkUnit.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" #include #include diff --git a/src/unit/FedActivationUnit.hpp b/include/ffnn/unit/FedActivationUnit.hpp similarity index 79% rename from src/unit/FedActivationUnit.hpp rename to include/ffnn/unit/FedActivationUnit.hpp index 03631de..3a3dc87 100644 --- a/src/unit/FedActivationUnit.hpp +++ b/include/ffnn/unit/FedActivationUnit.hpp @@ -1,11 +1,11 @@ #ifndef FED_ACTIVATION_UNIT #define FED_ACTIVATION_UNIT -#include "FedUnit.hpp" -#include "ActivationUnit.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" -#include "FeederInterface.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/unit/ActivationUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/feed/FeederInterface.hpp" #include @@ -19,8 +19,8 @@ class FedActivationUnit: public FedUnit, public ActivationUnit virtual ~FedActivationUnit(){} // return the output mean value (mu) and standard deviation (sigma) - virtual double getOutputMu(){return _actf->getOutputMu(FedUnit::getOutputMu());} - virtual double getOutputSigma(){return _actf->getOutputSigma(FedUnit::getOutputSigma());} + virtual double getOutputMu(){return _actf->getOutputMu(FedUnit::getOutputMu(), FedUnit::getOutputSigma());} + virtual double getOutputSigma(){return _actf->getOutputSigma(FedUnit::getOutputMu(), FedUnit::getOutputSigma());} // string code getters / setter virtual std::string getMemberTreeCode(){return composeCodes(FedUnit::getMemberTreeCode(), ActivationUnit::getMemberTreeCode());} // append actf treeCode diff --git a/src/unit/FedUnit.hpp b/include/ffnn/unit/FedUnit.hpp similarity index 95% rename from src/unit/FedUnit.hpp rename to include/ffnn/unit/FedUnit.hpp index 8a4d38a..1fc6fd9 100644 --- a/src/unit/FedUnit.hpp +++ b/include/ffnn/unit/FedUnit.hpp @@ -1,8 +1,8 @@ #ifndef FED_UNIT #define FED_UNIT -#include "NetworkUnit.hpp" -#include "FeederInterface.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" #include #include // for NULL diff --git a/src/unit/InputUnit.hpp b/include/ffnn/unit/InputUnit.hpp similarity index 96% rename from src/unit/InputUnit.hpp rename to include/ffnn/unit/InputUnit.hpp index 252e845..fa0a4a2 100644 --- a/src/unit/InputUnit.hpp +++ b/include/ffnn/unit/InputUnit.hpp @@ -1,7 +1,7 @@ #ifndef INPUT_UNIT #define INPUT_UNIT -#include "ShifterScalerUnit.hpp" +#include "ffnn/unit/ShifterScalerUnit.hpp" #include // Input Unit diff --git a/src/unit/NNUnit.hpp b/include/ffnn/unit/NNUnit.hpp similarity index 83% rename from src/unit/NNUnit.hpp rename to include/ffnn/unit/NNUnit.hpp index f393c63..8e1678b 100644 --- a/src/unit/NNUnit.hpp +++ b/include/ffnn/unit/NNUnit.hpp @@ -1,11 +1,11 @@ #ifndef NN_UNIT #define NN_UNIT -#include "FedActivationUnit.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" -#include "FeederInterface.hpp" -#include "NNRay.hpp" +#include "ffnn/unit/FedActivationUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/feed/NNRay.hpp" #include #include diff --git a/src/unit/NetworkUnit.hpp b/include/ffnn/unit/NetworkUnit.hpp similarity index 97% rename from src/unit/NetworkUnit.hpp rename to include/ffnn/unit/NetworkUnit.hpp index fc79d15..a81974a 100644 --- a/src/unit/NetworkUnit.hpp +++ b/include/ffnn/unit/NetworkUnit.hpp @@ -1,8 +1,8 @@ #ifndef NETWORK_UNIT #define NETWORK_UNIT -#include "SerializableComponent.hpp" -#include "FeederInterface.hpp" +#include "ffnn/serial/SerializableComponent.hpp" +#include "ffnn/feed/FeederInterface.hpp" #include #include // for NULL diff --git a/src/unit/OffsetUnit.hpp b/include/ffnn/unit/OffsetUnit.hpp similarity index 95% rename from src/unit/OffsetUnit.hpp rename to include/ffnn/unit/OffsetUnit.hpp index 80c3a98..58c6c8f 100644 --- a/src/unit/OffsetUnit.hpp +++ b/include/ffnn/unit/OffsetUnit.hpp @@ -1,7 +1,7 @@ #ifndef OFFSET_UNIT #define OFFSET_UNIT -#include "NetworkUnit.hpp" +#include "ffnn/unit/NetworkUnit.hpp" // Offset Unit class OffsetUnit: public NetworkUnit diff --git a/src/unit/OutputNNUnit.hpp b/include/ffnn/unit/OutputNNUnit.hpp similarity index 86% rename from src/unit/OutputNNUnit.hpp rename to include/ffnn/unit/OutputNNUnit.hpp index ef8db67..ae1d7f1 100644 --- a/src/unit/OutputNNUnit.hpp +++ b/include/ffnn/unit/OutputNNUnit.hpp @@ -1,9 +1,9 @@ #ifndef OUTPUT_NN_UNIT #define OUTPUT_NN_UNIT -#include "ShifterScalerNNUnit.hpp" -#include "ActivationFunctionManager.hpp" -#include "NNRay.hpp" +#include "ffnn/unit/ShifterScalerNNUnit.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/feed/NNRay.hpp" // Output Neural Network Unit class OutputNNUnit: public ShifterScalerNNUnit diff --git a/src/unit/ShifterScalerNNUnit.hpp b/include/ffnn/unit/ShifterScalerNNUnit.hpp similarity index 87% rename from src/unit/ShifterScalerNNUnit.hpp rename to include/ffnn/unit/ShifterScalerNNUnit.hpp index bcb53bb..28e3204 100644 --- a/src/unit/ShifterScalerNNUnit.hpp +++ b/include/ffnn/unit/ShifterScalerNNUnit.hpp @@ -1,11 +1,11 @@ #ifndef SHIFTER_SCALER_NN_UNIT #define SHIFTER_SCALER_NN_UNIT -#include "ShifterScalerUnit.hpp" -#include "NNUnit.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" -#include "NNRay.hpp" +#include "ffnn/unit/ShifterScalerUnit.hpp" +#include "ffnn/unit/NNUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/feed/NNRay.hpp" #include // for NULL #include diff --git a/src/unit/ShifterScalerUnit.hpp b/include/ffnn/unit/ShifterScalerUnit.hpp similarity index 96% rename from src/unit/ShifterScalerUnit.hpp rename to include/ffnn/unit/ShifterScalerUnit.hpp index 7a3f3f5..d8cd9f1 100644 --- a/src/unit/ShifterScalerUnit.hpp +++ b/include/ffnn/unit/ShifterScalerUnit.hpp @@ -1,8 +1,8 @@ #ifndef SHIFTER_SCALER_UNIT #define SHIFTER_SCALER_UNIT -#include "NetworkUnit.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include diff --git a/lib/Makefile.am b/lib/Makefile.am deleted file mode 100644 index af8e3e3..0000000 --- a/lib/Makefile.am +++ /dev/null @@ -1,40 +0,0 @@ -if !DEBUG -if !COVERAGE - AM_CXXFLAGS += $(OPTFLAGS) -endif -endif - -if COVERAGE - AM_CPPFLAGS += $(COVERAGE_CPPFLAGS) - AM_CXXFLAGS += $(COVERAGE_CXXFLAGS) - AM_LDFLAGS += $(COVERAGE_LDFLAGS) -endif - -if PROFILING - AM_CXXFLAGS += $(PROF_CFLAGS) - AM_LDFLAGS += $(PROF_LFLAGS) -endif - -if OPENMP - AM_CXXFLAGS += $(OMP_CFLAGS) - AM_LDFLAGS += $(OMP_LFLAGS) -endif - -lib_LTLIBRARIES = libffnn.la -include headers.am -include sources.am - -if COVERAGE -clean-local: - rm -f ../src/*/*.gcda - rm -f ../src/*/*.gcno - rm -f ../src/*/*.gcov -endif - -source-lists: - echo "pkginclude_HEADERS = \\" > headers.am - find ../src/ -name *.hpp | tr '\n' ' ' >> headers.am - echo "libffnn_la_SOURCES = \\" > sources.am - find ../src/ -name *.cpp | tr '\n' ' ' >> sources.am - -.PHONY: source-lists diff --git a/lib/README.md b/lib/README.md deleted file mode 100644 index e2a2dba..0000000 --- a/lib/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# lib - -In this folder the library will be built before installing. Just run `make` here or `make lib` from root folder. - -Also this is a default library search path for the project, -so one way to provide external libraries outside of standard paths is by dropping or linking them in here. diff --git a/lib/headers.am b/lib/headers.am deleted file mode 100644 index 1ad34e9..0000000 --- a/lib/headers.am +++ /dev/null @@ -1,2 +0,0 @@ -pkginclude_HEADERS = \ -../src/io/PrintUtilities.hpp ../src/unit/InputUnit.hpp ../src/unit/ShifterScalerUnit.hpp ../src/unit/ActivationUnit.hpp ../src/unit/FedActivationUnit.hpp ../src/unit/NetworkUnit.hpp ../src/unit/NNUnit.hpp ../src/unit/FedUnit.hpp ../src/unit/ShifterScalerNNUnit.hpp ../src/unit/OutputNNUnit.hpp ../src/unit/OffsetUnit.hpp ../src/layer/NNLayer.hpp ../src/layer/NetworkLayer.hpp ../src/layer/OutputNNLayer.hpp ../src/layer/InputLayer.hpp ../src/layer/FedLayer.hpp ../src/network/FeedForwardNeuralNetwork.hpp ../src/feeder/FeederInterface.hpp ../src/feeder/WeightedFeeder.hpp ../src/feeder/VariableFeeder.hpp ../src/feeder/SmartBetaGenerator.hpp ../src/feeder/NNRay.hpp ../src/feeder/StaticFeeder.hpp ../src/trainer/NNTrainerGSL.hpp ../src/trainer/NNTrainingData.hpp ../src/trainer/NNTrainingConfig.hpp ../src/trainer/NNTrainer.hpp ../src/serialize/SerializableComponent.hpp ../src/serialize/StringCodeUtilities.hpp ../src/feature_maps/PairSumMapUnit.hpp ../src/feature_maps/FeatureMapLayer.hpp ../src/feature_maps/PairSumMap.hpp ../src/feature_maps/EuclideanDistanceMapUnit.hpp ../src/feature_maps/PairDifferenceMapUnit.hpp ../src/feature_maps/EuclideanDistanceMap.hpp ../src/feature_maps/EuclideanPairDistanceMap.hpp ../src/feature_maps/IdentityMapUnit.hpp ../src/feature_maps/PairDifferenceMap.hpp ../src/feature_maps/FeatureMapUnit.hpp ../src/feature_maps/EuclideanPairDistanceMapUnit.hpp ../src/feature_maps/MultiDimStaticMap.hpp ../src/feature_maps/IdentityMap.hpp ../src/feature_maps/OneDimStaticMap.hpp ../src/feature_maps/ActivationMapUnit.hpp ../src/actf/ActivationFunctionManager.hpp ../src/actf/SRLUActivationFunction.hpp ../src/actf/TanSigmoidActivationFunction.hpp ../src/actf/SineActivationFunction.hpp ../src/actf/LogisticActivationFunction.hpp ../src/actf/ActivationFunctionInterface.hpp ../src/actf/SELUActivationFunction.hpp ../src/actf/IdentityActivationFunction.hpp ../src/actf/GaussianActivationFunction.hpp ../src/actf/ReLUActivationFunction.hpp \ No newline at end of file diff --git a/lib/sources.am b/lib/sources.am deleted file mode 100644 index 02610bf..0000000 --- a/lib/sources.am +++ /dev/null @@ -1,2 +0,0 @@ -libffnn_la_SOURCES = \ -../src/io/PrintUtilities.cpp ../src/unit/ActivationUnit.cpp ../src/unit/InputUnit.cpp ../src/unit/OutputNNUnit.cpp ../src/unit/NetworkUnit.cpp ../src/unit/FedUnit.cpp ../src/layer/NNLayer.cpp ../src/layer/InputLayer.cpp ../src/layer/OutputNNLayer.cpp ../src/layer/FedLayer.cpp ../src/layer/NetworkLayer.cpp ../src/network/FeedForwardNeuralNetwork.cpp ../src/feeder/NNRay.cpp ../src/feeder/WeightedFeeder.cpp ../src/feeder/FeederInterface.cpp ../src/feeder/SmartBetaGenerator.cpp ../src/feeder/VariableFeeder.cpp ../src/trainer/NNTrainerGSL.cpp ../src/trainer/NNTrainer.cpp ../src/serialize/StringCodeUtilities.cpp ../src/feature_maps/EuclideanPairDistanceMap.cpp ../src/feature_maps/IdentityMap.cpp ../src/feature_maps/FeatureMapLayer.cpp ../src/feature_maps/PairSumMap.cpp ../src/feature_maps/PairDifferenceMap.cpp ../src/feature_maps/MultiDimStaticMap.cpp ../src/feature_maps/OneDimStaticMap.cpp ../src/feature_maps/EuclideanDistanceMap.cpp ../src/actf/LogisticActivationFunction.cpp ../src/actf/ReLUActivationFunction.cpp ../src/actf/SELUActivationFunction.cpp ../src/actf/GaussianActivationFunction.cpp ../src/actf/ActivationFunctionManager.cpp ../src/actf/TanSigmoidActivationFunction.cpp ../src/actf/SineActivationFunction.cpp ../src/actf/SRLUActivationFunction.cpp \ No newline at end of file diff --git a/m4/README.md b/m4/README.md deleted file mode 100644 index cd7aa05..0000000 --- a/m4/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# m4 - -In this folder we collect special m4 macros for autoconf. diff --git a/m4/ax_ac_append_to_file.m4 b/m4/ax_ac_append_to_file.m4 deleted file mode 100644 index 242b3d5..0000000 --- a/m4/ax_ac_append_to_file.m4 +++ /dev/null @@ -1,32 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_ac_append_to_file.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_AC_APPEND_TO_FILE([FILE],[DATA]) -# -# DESCRIPTION -# -# Appends the specified data to the specified Autoconf is run. If you want -# to append to a file when configure is run use AX_APPEND_TO_FILE instead. -# -# LICENSE -# -# Copyright (c) 2009 Allan Caffee -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 10 - -AC_DEFUN([AX_AC_APPEND_TO_FILE],[ -AC_REQUIRE([AX_FILE_ESCAPES]) -m4_esyscmd( -AX_FILE_ESCAPES -[ -printf "%s" "$2" >> "$1" -]) -]) diff --git a/m4/ax_ac_print_to_file.m4 b/m4/ax_ac_print_to_file.m4 deleted file mode 100644 index 642dfc1..0000000 --- a/m4/ax_ac_print_to_file.m4 +++ /dev/null @@ -1,32 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_ac_print_to_file.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_AC_PRINT_TO_FILE([FILE],[DATA]) -# -# DESCRIPTION -# -# Writes the specified data to the specified file when Autoconf is run. If -# you want to print to a file when configure is run use AX_PRINT_TO_FILE -# instead. -# -# LICENSE -# -# Copyright (c) 2009 Allan Caffee -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 10 - -AC_DEFUN([AX_AC_PRINT_TO_FILE],[ -m4_esyscmd( -AC_REQUIRE([AX_FILE_ESCAPES]) -[ -printf "%s" "$2" > "$1" -]) -]) diff --git a/m4/ax_add_am_macro_static.m4 b/m4/ax_add_am_macro_static.m4 deleted file mode 100644 index 6442d24..0000000 --- a/m4/ax_add_am_macro_static.m4 +++ /dev/null @@ -1,28 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_add_am_macro_static.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_ADD_AM_MACRO_STATIC([RULE]) -# -# DESCRIPTION -# -# Adds the specified rule to $AMINCLUDE. -# -# LICENSE -# -# Copyright (c) 2009 Tom Howard -# Copyright (c) 2009 Allan Caffee -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 8 - -AC_DEFUN([AX_ADD_AM_MACRO_STATIC],[ - AC_REQUIRE([AX_AM_MACROS_STATIC]) - AX_AC_APPEND_TO_FILE(AMINCLUDE_STATIC,[$1]) -]) diff --git a/m4/ax_add_am_macros_static.m4 b/m4/ax_add_am_macros_static.m4 deleted file mode 100644 index f4cee8c..0000000 --- a/m4/ax_add_am_macros_static.m4 +++ /dev/null @@ -1,38 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_am_macros_static.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_AM_MACROS_STATIC -# -# DESCRIPTION -# -# Adds support for macros that create Automake rules. You must manually -# add the following line -# -# include $(top_srcdir)/aminclude_static.am -# -# to your Makefile.am files. -# -# LICENSE -# -# Copyright (c) 2009 Tom Howard -# Copyright (c) 2009 Allan Caffee -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 11 - -AC_DEFUN([AMINCLUDE_STATIC],[aminclude_static.am]) - -AC_DEFUN([AX_AM_MACROS_STATIC], -[ -AX_AC_PRINT_TO_FILE(AMINCLUDE_STATIC,[ -# ]AMINCLUDE_STATIC[ generated automatically by Autoconf -# from AX_AM_MACROS_STATIC on ]m4_esyscmd([LC_ALL=C date])[ -]) -]) diff --git a/m4/ax_check_enable_debug.m4 b/m4/ax_check_enable_debug.m4 deleted file mode 100644 index 5c4a262..0000000 --- a/m4/ax_check_enable_debug.m4 +++ /dev/null @@ -1,125 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_check_enable_debug.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_CHECK_ENABLE_DEBUG([enable by default=yes/info/profile/no], [ENABLE DEBUG VARIABLES ...], [DISABLE DEBUG VARIABLES NDEBUG ...], [IS-RELEASE]) -# -# DESCRIPTION -# -# Check for the presence of an --enable-debug option to configure, with -# the specified default value used when the option is not present. Return -# the value in the variable $ax_enable_debug. -# -# Specifying 'yes' adds '-g -O0' to the compilation flags for all -# languages. Specifying 'info' adds '-g' to the compilation flags. -# Specifying 'profile' adds '-g -pg' to the compilation flags and '-pg' to -# the linking flags. Otherwise, nothing is added. -# -# Define the variables listed in the second argument if debug is enabled, -# defaulting to no variables. Defines the variables listed in the third -# argument if debug is disabled, defaulting to NDEBUG. All lists of -# variables should be space-separated. -# -# If debug is not enabled, ensure AC_PROG_* will not add debugging flags. -# Should be invoked prior to any AC_PROG_* compiler checks. -# -# IS-RELEASE can be used to change the default to 'no' when making a -# release. Set IS-RELEASE to 'yes' or 'no' as appropriate. By default, it -# uses the value of $ax_is_release, so if you are using the AX_IS_RELEASE -# macro, there is no need to pass this parameter. -# -# AX_IS_RELEASE([git-directory]) -# AX_CHECK_ENABLE_DEBUG() -# -# LICENSE -# -# Copyright (c) 2011 Rhys Ulerich -# Copyright (c) 2014, 2015 Philip Withnall -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. - -#serial 8 - -AC_DEFUN([AX_CHECK_ENABLE_DEBUG],[ - AC_BEFORE([$0],[AC_PROG_CC])dnl - AC_BEFORE([$0],[AC_PROG_CXX])dnl - AC_BEFORE([$0],[AC_PROG_F77])dnl - AC_BEFORE([$0],[AC_PROG_FC])dnl - - AC_MSG_CHECKING(whether to enable debugging) - - ax_enable_debug_default=m4_tolower(m4_normalize(ifelse([$1],,[no],[$1]))) - ax_enable_debug_is_release=m4_tolower(m4_normalize(ifelse([$4],, - [$ax_is_release], - [$4]))) - - # If this is a release, override the default. - AS_IF([test "$ax_enable_debug_is_release" = "yes"], - [ax_enable_debug_default="no"]) - - m4_define(ax_enable_debug_vars,[m4_normalize(ifelse([$2],,,[$2]))]) - m4_define(ax_disable_debug_vars,[m4_normalize(ifelse([$3],,[NDEBUG],[$3]))]) - - AC_ARG_ENABLE(debug, - [AS_HELP_STRING([--enable-debug=]@<:@yes/info/profile/no@:>@,[compile with debugging])], - [],enable_debug=$ax_enable_debug_default) - - # empty mean debug yes - AS_IF([test "x$enable_debug" = "x"], - [enable_debug="yes"]) - - # case of debug - AS_CASE([$enable_debug], - [yes],[ - AC_MSG_RESULT(yes) - CFLAGS="${CFLAGS} -g -O0" - CXXFLAGS="${CXXFLAGS} -g -O0" - FFLAGS="${FFLAGS} -g -O0" - FCFLAGS="${FCFLAGS} -g -O0" - OBJCFLAGS="${OBJCFLAGS} -g -O0" - ], - [info],[ - AC_MSG_RESULT(info) - CFLAGS="${CFLAGS} -g" - CXXFLAGS="${CXXFLAGS} -g" - FFLAGS="${FFLAGS} -g" - FCFLAGS="${FCFLAGS} -g" - OBJCFLAGS="${OBJCFLAGS} -g" - ], - [profile],[ - AC_MSG_RESULT(profile) - CFLAGS="${CFLAGS} -g -pg" - CXXFLAGS="${CXXFLAGS} -g -pg" - FFLAGS="${FFLAGS} -g -pg" - FCFLAGS="${FCFLAGS} -g -pg" - OBJCFLAGS="${OBJCFLAGS} -g -pg" - LDFLAGS="${LDFLAGS} -pg" - ], - [ - AC_MSG_RESULT(no) - dnl Ensure AC_PROG_CC/CXX/F77/FC/OBJC will not enable debug flags - dnl by setting any unset environment flag variables - AS_IF([test "x${CFLAGS+set}" != "xset"], - [CFLAGS=""]) - AS_IF([test "x${CXXFLAGS+set}" != "xset"], - [CXXFLAGS=""]) - AS_IF([test "x${FFLAGS+set}" != "xset"], - [FFLAGS=""]) - AS_IF([test "x${FCFLAGS+set}" != "xset"], - [FCFLAGS=""]) - AS_IF([test "x${OBJCFLAGS+set}" != "xset"], - [OBJCFLAGS=""]) - ]) - - dnl Define various variables if debugging is disabled. - dnl assert.h is a NOP if NDEBUG is defined, so define it by default. - AS_IF([test "x$enable_debug" = "xyes"], - [m4_map_args_w(ax_enable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is enabled])])], - [m4_map_args_w(ax_disable_debug_vars, [AC_DEFINE(], [,,[Define if debugging is disabled])])]) - ax_enable_debug=$enable_debug -]) - diff --git a/m4/ax_check_gnu_make.m4 b/m4/ax_check_gnu_make.m4 deleted file mode 100644 index 6811043..0000000 --- a/m4/ax_check_gnu_make.m4 +++ /dev/null @@ -1,95 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_check_gnu_make.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_CHECK_GNU_MAKE([run-if-true],[run-if-false]) -# -# DESCRIPTION -# -# This macro searches for a GNU version of make. If a match is found: -# -# * The makefile variable `ifGNUmake' is set to the empty string, otherwise -# it is set to "#". This is useful for including a special features in a -# Makefile, which cannot be handled by other versions of make. -# * The makefile variable `ifnGNUmake' is set to #, otherwise -# it is set to the empty string. This is useful for including a special -# features in a Makefile, which can be handled -# by other versions of make or to specify else like clause. -# * The variable `_cv_gnu_make_command` is set to the command to invoke -# GNU make if it exists, the empty string otherwise. -# * The variable `ax_cv_gnu_make_command` is set to the command to invoke -# GNU make by copying `_cv_gnu_make_command`, otherwise it is unset. -# * If GNU Make is found, its version is extracted from the output of -# `make --version` as the last field of a record of space-separated -# columns and saved into the variable `ax_check_gnu_make_version`. -# * Additionally if GNU Make is found, run shell code run-if-true -# else run shell code run-if-false. -# -# Here is an example of its use: -# -# Makefile.in might contain: -# -# # A failsafe way of putting a dependency rule into a makefile -# $(DEPEND): -# $(CC) -MM $(srcdir)/*.c > $(DEPEND) -# -# @ifGNUmake@ ifeq ($(DEPEND),$(wildcard $(DEPEND))) -# @ifGNUmake@ include $(DEPEND) -# @ifGNUmake@ else -# fallback code -# @ifGNUmake@ endif -# -# Then configure.in would normally contain: -# -# AX_CHECK_GNU_MAKE() -# AC_OUTPUT(Makefile) -# -# Then perhaps to cause gnu make to override any other make, we could do -# something like this (note that GNU make always looks for GNUmakefile -# first): -# -# if ! test x$_cv_gnu_make_command = x ; then -# mv Makefile GNUmakefile -# echo .DEFAULT: > Makefile ; -# echo \ $_cv_gnu_make_command \$@ >> Makefile; -# fi -# -# Then, if any (well almost any) other make is called, and GNU make also -# exists, then the other make wraps the GNU make. -# -# LICENSE -# -# Copyright (c) 2008 John Darrington -# Copyright (c) 2015 Enrico M. Crisostomo -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 11 - -AC_DEFUN([AX_CHECK_GNU_MAKE],dnl - [AC_PROG_AWK - AC_CACHE_CHECK([for GNU make],[_cv_gnu_make_command],[dnl - _cv_gnu_make_command="" ; -dnl Search all the common names for GNU make - for a in "$MAKE" make gmake gnumake ; do - if test -z "$a" ; then continue ; fi ; - if "$a" --version 2> /dev/null | grep GNU 2>&1 > /dev/null ; then - _cv_gnu_make_command=$a ; - AX_CHECK_GNU_MAKE_HEADLINE=$("$a" --version 2> /dev/null | grep "GNU Make") - ax_check_gnu_make_version=$(echo ${AX_CHECK_GNU_MAKE_HEADLINE} | ${AWK} -F " " '{ print $(NF); }') - break ; - fi - done ;]) -dnl If there was a GNU version, then set @ifGNUmake@ to the empty string, '#' otherwise - AS_VAR_IF([_cv_gnu_make_command], [""], [AS_VAR_SET([ifGNUmake], ["#"])], [AS_VAR_SET([ifGNUmake], [""])]) - AS_VAR_IF([_cv_gnu_make_command], [""], [AS_VAR_SET([ifnGNUmake], [""])], [AS_VAR_SET([ifGNUmake], ["#"])]) - AS_VAR_IF([_cv_gnu_make_command], [""], [AS_UNSET(ax_cv_gnu_make_command)], [AS_VAR_SET([ax_cv_gnu_make_command], [${_cv_gnu_make_command}])]) - AS_VAR_IF([_cv_gnu_make_command], [""],[$2],[$1]) - AC_SUBST([ifGNUmake]) - AC_SUBST([ifnGNUmake]) -]) diff --git a/m4/ax_coverage.m4 b/m4/ax_coverage.m4 deleted file mode 100644 index 0a6c93c..0000000 --- a/m4/ax_coverage.m4 +++ /dev/null @@ -1,166 +0,0 @@ -# =============================================================================== -# Adapted from https://www.gnu.org/software/autoconf-archive/ax_code_coverage.html -# ================================================================================ -# -# SYNOPSIS -# -# AX_COVERAGE() -# -# DESCRIPTION -# -# Defines COVERAGE_CPPFLAGS, COVERAGE_CFLAGS, -# COVERAGE_CXXFLAGS and COVERAGE_LDFLAGS which should be included -# in the CPPFLAGS, CFLAGS CXXFLAGS and LIBS/LIBADD variables of every -# build target (program or library) which should be built with code -# coverage support. Also add rules using AX_ADD_AM_MACRO_STATIC; and -# $enable_coverage which can be used in subsequent configure output. -# COVERAGE_ENABLED is defined and substituted, and corresponds to the -# value of the --enable-coverage option, which defaults to being -# disabled. -# -# Test also for gcov program and create GCOV variable that could be -# substituted. -# -# Note that all optimization flags in CFLAGS must be disabled when code -# coverage is enabled. -# -# Usage example: -# -# configure.ac: -# -# AX_COVERAGE -# -# Makefile.am: -# -# include $(top_srcdir)/aminclude_static.am -# -# my_program_LIBS = ... $(COVERAGE_LDFLAGS) ... -# my_program_CPPFLAGS = ... $(COVERAGE_CPPFLAGS) ... -# my_program_CFLAGS = ... $(COVERAGE_CFLAGS) ... -# my_program_CXXFLAGS = ... $(COVERAGE_CXXFLAGS) ... -# -# clean-local: coverage-clean -# dist-clean-local: coverage-dist-clean -# -# This code was derived from Makefile.decl in GLib, originally licensed -# under LGPLv2.1+. -# -# LICENSE -# -# Copyright (c) 2012, 2016 Philip Withnall -# Copyright (c) 2012 Xan Lopez -# Copyright (c) 2012 Christian Persch -# Copyright (c) 2012 Paolo Borelli -# Copyright (c) 2012 Dan Winship -# Copyright (c) 2015,2018 Bastien ROUCARIES -# -# Modified in 2018 by Jan Kessler. -# -# This library is free software; you can redistribute it and/or modify it -# under the terms of the GNU Lesser General Public License as published by -# the Free Software Foundation; either version 2.1 of the License, or (at -# your option) any later version. -# -# This library is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser -# General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with this program. If not, see . - -#serial 31 - -m4_define(_AX_COVERAGE_RULES,[ -AX_ADD_AM_MACRO_STATIC([ - -# Code coverage -# -if COVERAGE_ENABLED - ifeq (\$(abs_builddir), \$(abs_top_builddir)) - -coverage-clean: - -find . \\( -name \"*.gcda\" -o -name \"*.gcno\" -o -name \"*.gcov\" \\) -delete - -coverage-dist-clean: -A][M_DISTCHECK_CONFIGURE_FLAGS = \$(A][M_DISTCHECK_CONFIGURE_FLAGS) --disable-coverage - - else # ifneq (\$(abs_builddir), \$(abs_top_builddir)) - -check-coverage: check - -coverage-clean: - -coverage-dist-clean: - - endif # ifeq (\$(abs_builddir), \$(abs_top_builddir)) - -else #! COVERAGE_ENABLED - -# Use recursive makes in order to ignore errors during check -check-coverage: - @echo \"Need to reconfigure with --enable-coverage\" - -coverage-clean: - -coverage-dist-clean: - -endif #COVERAGE_ENABLED - -.PHONY: check-coverage coverage-dist-clean coverage-clean - -]) -]) - -AC_DEFUN([_AX_COVERAGE_ENABLED],[ - AX_CHECK_GNU_MAKE([],[AC_MSG_ERROR([not using GNU make that is needed for coverage])]) - AC_REQUIRE([AX_ADD_AM_MACRO_STATIC]) - # check for gcov - AC_CHECK_TOOL([GCOV], - [$_AX_COVERAGE_GCOV_PROG_WITH], - [:]) - AS_IF([test "X$GCOV" = "X:"], - [AC_MSG_ERROR([gcov is needed to do coverage])]) - AC_SUBST([GCOV]) - - dnl Build the code coverage flags - dnl Define COVERAGE_LDFLAGS for backwards compatibility - COVERAGE_CPPFLAGS="" - COVERAGE_CFLAGS="-O0 -g -fprofile-arcs -ftest-coverage" - COVERAGE_CXXFLAGS="-O0 -g -fprofile-arcs -ftest-coverage" - COVERAGE_LDFLAGS="--coverage" - - dnl Check if g++ is being used - - AC_SUBST([COVERAGE_CPPFLAGS]) - AC_SUBST([COVERAGE_CFLAGS]) - AC_SUBST([COVERAGE_CXXFLAGS]) - AC_SUBST([COVERAGE_LDFLAGS]) -]) - -AC_DEFUN([AX_COVERAGE],[ - dnl Check for --enable-coverage - - # allow to override gcov location - AC_ARG_WITH([gcov], - [AS_HELP_STRING([--with-gcov[=GCOV]], [use given GCOV for coverage (GCOV=gcov).])], - [_AX_COVERAGE_GCOV_PROG_WITH=$with_gcov], - [_AX_COVERAGE_GCOV_PROG_WITH=gcov]) - - AC_MSG_CHECKING([whether to build with code coverage support]) - AC_ARG_ENABLE([coverage], - AS_HELP_STRING([--enable-coverage], - [Whether to enable code coverage support]),, - enable_coverage=no) - - AM_CONDITIONAL([COVERAGE_ENABLED], [test "x$enable_coverage" = xyes]) - AC_SUBST([COVERAGE_ENABLED], [$enable_coverage]) - AC_MSG_RESULT($enable_coverage) - - AS_IF([ test "x$enable_coverage" = xyes ], [ - _AX_COVERAGE_ENABLED - ]) - - _AX_COVERAGE_RULES -]) - diff --git a/m4/ax_cxx_compile_stdcxx.m4 b/m4/ax_cxx_compile_stdcxx.m4 deleted file mode 100644 index e5eb94f..0000000 --- a/m4/ax_cxx_compile_stdcxx.m4 +++ /dev/null @@ -1,949 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional]) -# -# DESCRIPTION -# -# Check for baseline language coverage in the compiler for the specified -# version of the C++ standard. If necessary, add switches to CXX and -# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard) -# or '14' (for the C++14 standard). -# -# The second argument, if specified, indicates whether you insist on an -# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g. -# -std=c++11). If neither is specified, you get whatever works, with -# preference for an extended mode. -# -# The third argument, if specified 'mandatory' or if left unspecified, -# indicates that baseline support for the specified C++ standard is -# required and that the macro should error out if no mode with that -# support is found. If specified 'optional', then configuration proceeds -# regardless, after defining HAVE_CXX${VERSION} if and only if a -# supporting mode is found. -# -# LICENSE -# -# Copyright (c) 2008 Benjamin Kosnik -# Copyright (c) 2012 Zack Weinberg -# Copyright (c) 2013 Roy Stogner -# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov -# Copyright (c) 2015 Paul Norman -# Copyright (c) 2015 Moritz Klammler -# Copyright (c) 2016, 2018 Krzesimir Nowak -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 10 - -dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro -dnl (serial version number 13). - -AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl - m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"], - [$1], [14], [ax_cxx_compile_alternatives="14 1y"], - [$1], [17], [ax_cxx_compile_alternatives="17 1z"], - [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl - m4_if([$2], [], [], - [$2], [ext], [], - [$2], [noext], [], - [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl - m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true], - [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true], - [$3], [optional], [ax_cxx_compile_cxx$1_required=false], - [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])]) - AC_LANG_PUSH([C++])dnl - ac_success=no - - m4_if([$2], [noext], [], [dnl - if test x$ac_success = xno; then - for alternative in ${ax_cxx_compile_alternatives}; do - switch="-std=gnu++${alternative}" - cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) - AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, - $cachevar, - [ac_save_CXX="$CXX" - CXX="$CXX $switch" - AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], - [eval $cachevar=yes], - [eval $cachevar=no]) - CXX="$ac_save_CXX"]) - if eval test x\$$cachevar = xyes; then - CXX="$CXX $switch" - if test -n "$CXXCPP" ; then - CXXCPP="$CXXCPP $switch" - fi - ac_success=yes - break - fi - done - fi]) - - m4_if([$2], [ext], [], [dnl - if test x$ac_success = xno; then - dnl HP's aCC needs +std=c++11 according to: - dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf - dnl Cray's crayCC needs "-h std=c++11" - for alternative in ${ax_cxx_compile_alternatives}; do - for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do - cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch]) - AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch, - $cachevar, - [ac_save_CXX="$CXX" - CXX="$CXX $switch" - AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])], - [eval $cachevar=yes], - [eval $cachevar=no]) - CXX="$ac_save_CXX"]) - if eval test x\$$cachevar = xyes; then - CXX="$CXX $switch" - if test -n "$CXXCPP" ; then - CXXCPP="$CXXCPP $switch" - fi - ac_success=yes - break - fi - done - if test x$ac_success = xyes; then - break - fi - done - fi]) - AC_LANG_POP([C++]) - if test x$ax_cxx_compile_cxx$1_required = xtrue; then - if test x$ac_success = xno; then - AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.]) - fi - fi - if test x$ac_success = xno; then - HAVE_CXX$1=0 - AC_MSG_NOTICE([No compiler with C++$1 support was found]) - else - HAVE_CXX$1=1 - AC_DEFINE(HAVE_CXX$1,1, - [define if the compiler supports basic C++$1 syntax]) - fi - AC_SUBST(HAVE_CXX$1) -]) - - -dnl Test body for checking C++11 support - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11], - _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 -) - - -dnl Test body for checking C++14 support - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14], - _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 - _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 -) - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17], - _AX_CXX_COMPILE_STDCXX_testbody_new_in_11 - _AX_CXX_COMPILE_STDCXX_testbody_new_in_14 - _AX_CXX_COMPILE_STDCXX_testbody_new_in_17 -) - -dnl Tests for new features in C++11 - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[ - -// If the compiler admits that it is not ready for C++11, why torture it? -// Hopefully, this will speed up the test. - -#ifndef __cplusplus - -#error "This is not a C++ compiler" - -#elif __cplusplus < 201103L - -#error "This is not a C++11 compiler" - -#else - -namespace cxx11 -{ - - namespace test_static_assert - { - - template - struct check - { - static_assert(sizeof(int) <= sizeof(T), "not big enough"); - }; - - } - - namespace test_final_override - { - - struct Base - { - virtual void f() {} - }; - - struct Derived : public Base - { - virtual void f() override {} - }; - - } - - namespace test_double_right_angle_brackets - { - - template < typename T > - struct check {}; - - typedef check single_type; - typedef check> double_type; - typedef check>> triple_type; - typedef check>>> quadruple_type; - - } - - namespace test_decltype - { - - int - f() - { - int a = 1; - decltype(a) b = 2; - return a + b; - } - - } - - namespace test_type_deduction - { - - template < typename T1, typename T2 > - struct is_same - { - static const bool value = false; - }; - - template < typename T > - struct is_same - { - static const bool value = true; - }; - - template < typename T1, typename T2 > - auto - add(T1 a1, T2 a2) -> decltype(a1 + a2) - { - return a1 + a2; - } - - int - test(const int c, volatile int v) - { - static_assert(is_same::value == true, ""); - static_assert(is_same::value == false, ""); - static_assert(is_same::value == false, ""); - auto ac = c; - auto av = v; - auto sumi = ac + av + 'x'; - auto sumf = ac + av + 1.0; - static_assert(is_same::value == true, ""); - static_assert(is_same::value == true, ""); - static_assert(is_same::value == true, ""); - static_assert(is_same::value == false, ""); - static_assert(is_same::value == true, ""); - return (sumf > 0.0) ? sumi : add(c, v); - } - - } - - namespace test_noexcept - { - - int f() { return 0; } - int g() noexcept { return 0; } - - static_assert(noexcept(f()) == false, ""); - static_assert(noexcept(g()) == true, ""); - - } - - namespace test_constexpr - { - - template < typename CharT > - unsigned long constexpr - strlen_c_r(const CharT *const s, const unsigned long acc) noexcept - { - return *s ? strlen_c_r(s + 1, acc + 1) : acc; - } - - template < typename CharT > - unsigned long constexpr - strlen_c(const CharT *const s) noexcept - { - return strlen_c_r(s, 0UL); - } - - static_assert(strlen_c("") == 0UL, ""); - static_assert(strlen_c("1") == 1UL, ""); - static_assert(strlen_c("example") == 7UL, ""); - static_assert(strlen_c("another\0example") == 7UL, ""); - - } - - namespace test_rvalue_references - { - - template < int N > - struct answer - { - static constexpr int value = N; - }; - - answer<1> f(int&) { return answer<1>(); } - answer<2> f(const int&) { return answer<2>(); } - answer<3> f(int&&) { return answer<3>(); } - - void - test() - { - int i = 0; - const int c = 0; - static_assert(decltype(f(i))::value == 1, ""); - static_assert(decltype(f(c))::value == 2, ""); - static_assert(decltype(f(0))::value == 3, ""); - } - - } - - namespace test_uniform_initialization - { - - struct test - { - static const int zero {}; - static const int one {1}; - }; - - static_assert(test::zero == 0, ""); - static_assert(test::one == 1, ""); - - } - - namespace test_lambdas - { - - void - test1() - { - auto lambda1 = [](){}; - auto lambda2 = lambda1; - lambda1(); - lambda2(); - } - - int - test2() - { - auto a = [](int i, int j){ return i + j; }(1, 2); - auto b = []() -> int { return '0'; }(); - auto c = [=](){ return a + b; }(); - auto d = [&](){ return c; }(); - auto e = [a, &b](int x) mutable { - const auto identity = [](int y){ return y; }; - for (auto i = 0; i < a; ++i) - a += b--; - return x + identity(a + b); - }(0); - return a + b + c + d + e; - } - - int - test3() - { - const auto nullary = [](){ return 0; }; - const auto unary = [](int x){ return x; }; - using nullary_t = decltype(nullary); - using unary_t = decltype(unary); - const auto higher1st = [](nullary_t f){ return f(); }; - const auto higher2nd = [unary](nullary_t f1){ - return [unary, f1](unary_t f2){ return f2(unary(f1())); }; - }; - return higher1st(nullary) + higher2nd(nullary)(unary); - } - - } - - namespace test_variadic_templates - { - - template - struct sum; - - template - struct sum - { - static constexpr auto value = N0 + sum::value; - }; - - template <> - struct sum<> - { - static constexpr auto value = 0; - }; - - static_assert(sum<>::value == 0, ""); - static_assert(sum<1>::value == 1, ""); - static_assert(sum<23>::value == 23, ""); - static_assert(sum<1, 2>::value == 3, ""); - static_assert(sum<5, 5, 11>::value == 21, ""); - static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, ""); - - } - - // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae - // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function - // because of this. - namespace test_template_alias_sfinae - { - - struct foo {}; - - template - using member = typename T::member_type; - - template - void func(...) {} - - template - void func(member*) {} - - void test(); - - void test() { func(0); } - - } - -} // namespace cxx11 - -#endif // __cplusplus >= 201103L - -]]) - - -dnl Tests for new features in C++14 - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[ - -// If the compiler admits that it is not ready for C++14, why torture it? -// Hopefully, this will speed up the test. - -#ifndef __cplusplus - -#error "This is not a C++ compiler" - -#elif __cplusplus < 201402L - -#error "This is not a C++14 compiler" - -#else - -namespace cxx14 -{ - - namespace test_polymorphic_lambdas - { - - int - test() - { - const auto lambda = [](auto&&... args){ - const auto istiny = [](auto x){ - return (sizeof(x) == 1UL) ? 1 : 0; - }; - const int aretiny[] = { istiny(args)... }; - return aretiny[0]; - }; - return lambda(1, 1L, 1.0f, '1'); - } - - } - - namespace test_binary_literals - { - - constexpr auto ivii = 0b0000000000101010; - static_assert(ivii == 42, "wrong value"); - - } - - namespace test_generalized_constexpr - { - - template < typename CharT > - constexpr unsigned long - strlen_c(const CharT *const s) noexcept - { - auto length = 0UL; - for (auto p = s; *p; ++p) - ++length; - return length; - } - - static_assert(strlen_c("") == 0UL, ""); - static_assert(strlen_c("x") == 1UL, ""); - static_assert(strlen_c("test") == 4UL, ""); - static_assert(strlen_c("another\0test") == 7UL, ""); - - } - - namespace test_lambda_init_capture - { - - int - test() - { - auto x = 0; - const auto lambda1 = [a = x](int b){ return a + b; }; - const auto lambda2 = [a = lambda1(x)](){ return a; }; - return lambda2(); - } - - } - - namespace test_digit_separators - { - - constexpr auto ten_million = 100'000'000; - static_assert(ten_million == 100000000, ""); - - } - - namespace test_return_type_deduction - { - - auto f(int& x) { return x; } - decltype(auto) g(int& x) { return x; } - - template < typename T1, typename T2 > - struct is_same - { - static constexpr auto value = false; - }; - - template < typename T > - struct is_same - { - static constexpr auto value = true; - }; - - int - test() - { - auto x = 0; - static_assert(is_same::value, ""); - static_assert(is_same::value, ""); - return x; - } - - } - -} // namespace cxx14 - -#endif // __cplusplus >= 201402L - -]]) - - -dnl Tests for new features in C++17 - -m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[ - -// If the compiler admits that it is not ready for C++17, why torture it? -// Hopefully, this will speed up the test. - -#ifndef __cplusplus - -#error "This is not a C++ compiler" - -#elif __cplusplus < 201703L - -#error "This is not a C++17 compiler" - -#else - -#include -#include -#include - -namespace cxx17 -{ - - namespace test_constexpr_lambdas - { - - constexpr int foo = [](){return 42;}(); - - } - - namespace test::nested_namespace::definitions - { - - } - - namespace test_fold_expression - { - - template - int multiply(Args... args) - { - return (args * ... * 1); - } - - template - bool all(Args... args) - { - return (args && ...); - } - - } - - namespace test_extended_static_assert - { - - static_assert (true); - - } - - namespace test_auto_brace_init_list - { - - auto foo = {5}; - auto bar {5}; - - static_assert(std::is_same, decltype(foo)>::value); - static_assert(std::is_same::value); - } - - namespace test_typename_in_template_template_parameter - { - - template typename X> struct D; - - } - - namespace test_fallthrough_nodiscard_maybe_unused_attributes - { - - int f1() - { - return 42; - } - - [[nodiscard]] int f2() - { - [[maybe_unused]] auto unused = f1(); - - switch (f1()) - { - case 17: - f1(); - [[fallthrough]]; - case 42: - f1(); - } - return f1(); - } - - } - - namespace test_extended_aggregate_initialization - { - - struct base1 - { - int b1, b2 = 42; - }; - - struct base2 - { - base2() { - b3 = 42; - } - int b3; - }; - - struct derived : base1, base2 - { - int d; - }; - - derived d1 {{1, 2}, {}, 4}; // full initialization - derived d2 {{}, {}, 4}; // value-initialized bases - - } - - namespace test_general_range_based_for_loop - { - - struct iter - { - int i; - - int& operator* () - { - return i; - } - - const int& operator* () const - { - return i; - } - - iter& operator++() - { - ++i; - return *this; - } - }; - - struct sentinel - { - int i; - }; - - bool operator== (const iter& i, const sentinel& s) - { - return i.i == s.i; - } - - bool operator!= (const iter& i, const sentinel& s) - { - return !(i == s); - } - - struct range - { - iter begin() const - { - return {0}; - } - - sentinel end() const - { - return {5}; - } - }; - - void f() - { - range r {}; - - for (auto i : r) - { - [[maybe_unused]] auto v = i; - } - } - - } - - namespace test_lambda_capture_asterisk_this_by_value - { - - struct t - { - int i; - int foo() - { - return [*this]() - { - return i; - }(); - } - }; - - } - - namespace test_enum_class_construction - { - - enum class byte : unsigned char - {}; - - byte foo {42}; - - } - - namespace test_constexpr_if - { - - template - int f () - { - if constexpr(cond) - { - return 13; - } - else - { - return 42; - } - } - - } - - namespace test_selection_statement_with_initializer - { - - int f() - { - return 13; - } - - int f2() - { - if (auto i = f(); i > 0) - { - return 3; - } - - switch (auto i = f(); i + 4) - { - case 17: - return 2; - - default: - return 1; - } - } - - } - - namespace test_template_argument_deduction_for_class_templates - { - - template - struct pair - { - pair (T1 p1, T2 p2) - : m1 {p1}, - m2 {p2} - {} - - T1 m1; - T2 m2; - }; - - void f() - { - [[maybe_unused]] auto p = pair{13, 42u}; - } - - } - - namespace test_non_type_auto_template_parameters - { - - template - struct B - {}; - - B<5> b1; - B<'a'> b2; - - } - - namespace test_structured_bindings - { - - int arr[2] = { 1, 2 }; - std::pair pr = { 1, 2 }; - - auto f1() -> int(&)[2] - { - return arr; - } - - auto f2() -> std::pair& - { - return pr; - } - - struct S - { - int x1 : 2; - volatile double y1; - }; - - S f3() - { - return {}; - } - - auto [ x1, y1 ] = f1(); - auto& [ xr1, yr1 ] = f1(); - auto [ x2, y2 ] = f2(); - auto& [ xr2, yr2 ] = f2(); - const auto [ x3, y3 ] = f3(); - - } - - namespace test_exception_spec_type_system - { - - struct Good {}; - struct Bad {}; - - void g1() noexcept; - void g2(); - - template - Bad - f(T*, T*); - - template - Good - f(T1*, T2*); - - static_assert (std::is_same_v); - - } - - namespace test_inline_variables - { - - template void f(T) - {} - - template inline T g(T) - { - return T{}; - } - - template<> inline void f<>(int) - {} - - template<> int g<>(int) - { - return 5; - } - - } - -} // namespace cxx17 - -#endif // __cplusplus < 201703L - -]]) - diff --git a/m4/ax_file_escapes.m4 b/m4/ax_file_escapes.m4 deleted file mode 100644 index a86fdc3..0000000 --- a/m4/ax_file_escapes.m4 +++ /dev/null @@ -1,30 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_file_escapes.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_FILE_ESCAPES -# -# DESCRIPTION -# -# Writes the specified data to the specified file. -# -# LICENSE -# -# Copyright (c) 2008 Tom Howard -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 8 - -AC_DEFUN([AX_FILE_ESCAPES],[ -AX_DOLLAR="\$" -AX_SRB="\\135" -AX_SLB="\\133" -AX_BS="\\\\" -AX_DQ="\"" -]) diff --git a/m4/ax_valgrind_check.m4 b/m4/ax_valgrind_check.m4 deleted file mode 100644 index 7033798..0000000 --- a/m4/ax_valgrind_check.m4 +++ /dev/null @@ -1,239 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_valgrind_check.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_VALGRIND_DFLT(memcheck|helgrind|drd|sgcheck, on|off) -# AX_VALGRIND_CHECK() -# -# DESCRIPTION -# -# AX_VALGRIND_CHECK checks whether Valgrind is present and, if so, allows -# running `make check` under a variety of Valgrind tools to check for -# memory and threading errors. -# -# Defines VALGRIND_CHECK_RULES which should be substituted in your -# Makefile; and $enable_valgrind which can be used in subsequent configure -# output. VALGRIND_ENABLED is defined and substituted, and corresponds to -# the value of the --enable-valgrind option, which defaults to being -# enabled if Valgrind is installed and disabled otherwise. Individual -# Valgrind tools can be disabled via --disable-valgrind-, the -# default is configurable via the AX_VALGRIND_DFLT command or is to use -# all commands not disabled via AX_VALGRIND_DFLT. All AX_VALGRIND_DFLT -# calls must be made before the call to AX_VALGRIND_CHECK. -# -# If unit tests are written using a shell script and automake's -# LOG_COMPILER system, the $(VALGRIND) variable can be used within the -# shell scripts to enable Valgrind, as described here: -# -# https://www.gnu.org/software/gnulib/manual/html_node/Running-self_002dtests-under-valgrind.html -# -# Usage example: -# -# configure.ac: -# -# AX_VALGRIND_DFLT([sgcheck], [off]) -# AX_VALGRIND_CHECK -# -# in each Makefile.am with tests: -# -# @VALGRIND_CHECK_RULES@ -# VALGRIND_SUPPRESSIONS_FILES = my-project.supp -# EXTRA_DIST = my-project.supp -# -# This results in a "check-valgrind" rule being added. Running `make -# check-valgrind` in that directory will recursively run the module's test -# suite (`make check`) once for each of the available Valgrind tools (out -# of memcheck, helgrind and drd) while the sgcheck will be skipped unless -# enabled again on the commandline with --enable-valgrind-sgcheck. The -# results for each check will be output to test-suite-$toolname.log. The -# target will succeed if there are zero errors and fail otherwise. -# -# Alternatively, a "check-valgrind-$TOOL" rule will be added, for $TOOL in -# memcheck, helgrind, drd and sgcheck. These are useful because often only -# some of those tools can be ran cleanly on a codebase. -# -# The macro supports running with and without libtool. -# -# LICENSE -# -# Copyright (c) 2014, 2015, 2016 Philip Withnall -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 17 - -dnl Configured tools -m4_define([valgrind_tool_list], [[memcheck], [helgrind], [drd], [sgcheck]]) -m4_set_add_all([valgrind_exp_tool_set], [sgcheck]) -m4_foreach([vgtool], [valgrind_tool_list], - [m4_define([en_dflt_valgrind_]vgtool, [on])]) - -AC_DEFUN([AX_VALGRIND_DFLT],[ - m4_define([en_dflt_valgrind_$1], [$2]) -])dnl - -AM_EXTRA_RECURSIVE_TARGETS([check-valgrind]) -m4_foreach([vgtool], [valgrind_tool_list], - [AM_EXTRA_RECURSIVE_TARGETS([check-valgrind-]vgtool)]) - -AC_DEFUN([AX_VALGRIND_CHECK],[ - dnl Check for --enable-valgrind - AC_ARG_ENABLE([valgrind], - [AS_HELP_STRING([--enable-valgrind], [Whether to enable Valgrind on the unit tests])], - [enable_valgrind=$enableval],[enable_valgrind=]) - - AS_IF([test "$enable_valgrind" != "no"],[ - # Check for Valgrind. - AC_CHECK_PROG([VALGRIND],[valgrind],[valgrind]) - AS_IF([test "$VALGRIND" = ""],[ - AS_IF([test "$enable_valgrind" = "yes"],[ - AC_MSG_ERROR([Could not find valgrind; either install it or reconfigure with --disable-valgrind]) - ],[ - enable_valgrind=no - ]) - ],[ - enable_valgrind=yes - ]) - ]) - - AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) - AC_SUBST([VALGRIND_ENABLED],[$enable_valgrind]) - - # Check for Valgrind tools we care about. - [valgrind_enabled_tools=] - m4_foreach([vgtool],[valgrind_tool_list],[ - AC_ARG_ENABLE([valgrind-]vgtool, - m4_if(m4_defn([en_dflt_valgrind_]vgtool),[off],dnl -[AS_HELP_STRING([--enable-valgrind-]vgtool, [Whether to use ]vgtool[ during the Valgrind tests])],dnl -[AS_HELP_STRING([--disable-valgrind-]vgtool, [Whether to skip ]vgtool[ during the Valgrind tests])]), - [enable_valgrind_]vgtool[=$enableval], - [enable_valgrind_]vgtool[=]) - AS_IF([test "$enable_valgrind" = "no"],[ - enable_valgrind_]vgtool[=no], - [test "$enable_valgrind_]vgtool[" ]dnl -m4_if(m4_defn([en_dflt_valgrind_]vgtool), [off], [= "yes"], [!= "no"]),[ - AC_CACHE_CHECK([for Valgrind tool ]vgtool, - [ax_cv_valgrind_tool_]vgtool,[ - ax_cv_valgrind_tool_]vgtool[=no - m4_set_contains([valgrind_exp_tool_set],vgtool, - [m4_define([vgtoolx],[exp-]vgtool)], - [m4_define([vgtoolx],vgtool)]) - AS_IF([`$VALGRIND --tool=]vgtoolx[ --help >/dev/null 2>&1`],[ - ax_cv_valgrind_tool_]vgtool[=yes - ]) - ]) - AS_IF([test "$ax_cv_valgrind_tool_]vgtool[" = "no"],[ - AS_IF([test "$enable_valgrind_]vgtool[" = "yes"],[ - AC_MSG_ERROR([Valgrind does not support ]vgtool[; reconfigure with --disable-valgrind-]vgtool) - ],[ - enable_valgrind_]vgtool[=no - ]) - ],[ - enable_valgrind_]vgtool[=yes - ]) - ]) - AS_IF([test "$enable_valgrind_]vgtool[" = "yes"],[ - valgrind_enabled_tools="$valgrind_enabled_tools ]m4_bpatsubst(vgtool,[^exp-])[" - ]) - AC_SUBST([ENABLE_VALGRIND_]vgtool,[$enable_valgrind_]vgtool) - ]) - AC_SUBST([valgrind_tools],["]m4_join([ ], valgrind_tool_list)["]) - AC_SUBST([valgrind_enabled_tools],[$valgrind_enabled_tools]) - -[VALGRIND_CHECK_RULES=' -# Valgrind check -# -# Optional: -# - VALGRIND_SUPPRESSIONS_FILES: Space-separated list of Valgrind suppressions -# files to load. (Default: empty) -# - VALGRIND_FLAGS: General flags to pass to all Valgrind tools. -# (Default: --num-callers=30) -# - VALGRIND_$toolname_FLAGS: Flags to pass to Valgrind $toolname (one of: -# memcheck, helgrind, drd, sgcheck). (Default: various) - -# Optional variables -VALGRIND_SUPPRESSIONS ?= $(addprefix --suppressions=,$(VALGRIND_SUPPRESSIONS_FILES)) -VALGRIND_FLAGS ?= --num-callers=30 -VALGRIND_memcheck_FLAGS ?= --leak-check=full --show-reachable=no -VALGRIND_helgrind_FLAGS ?= --history-level=approx -VALGRIND_drd_FLAGS ?= -VALGRIND_sgcheck_FLAGS ?= - -# Internal use -valgrind_log_files = $(addprefix test-suite-,$(addsuffix .log,$(valgrind_tools))) - -valgrind_memcheck_flags = --tool=memcheck $(VALGRIND_memcheck_FLAGS) -valgrind_helgrind_flags = --tool=helgrind $(VALGRIND_helgrind_FLAGS) -valgrind_drd_flags = --tool=drd $(VALGRIND_drd_FLAGS) -valgrind_sgcheck_flags = --tool=exp-sgcheck $(VALGRIND_sgcheck_FLAGS) - -valgrind_quiet = $(valgrind_quiet_$(V)) -valgrind_quiet_ = $(valgrind_quiet_$(AM_DEFAULT_VERBOSITY)) -valgrind_quiet_0 = --quiet -valgrind_v_use = $(valgrind_v_use_$(V)) -valgrind_v_use_ = $(valgrind_v_use_$(AM_DEFAULT_VERBOSITY)) -valgrind_v_use_0 = @echo " USE " $(patsubst check-valgrind-%-am,%,$''@):; - -# Support running with and without libtool. -ifneq ($(LIBTOOL),) -valgrind_lt = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=execute -else -valgrind_lt = -endif - -# Use recursive makes in order to ignore errors during check -check-valgrind-am: -ifeq ($(VALGRIND_ENABLED),yes) - $(A''M_V_at)$(MAKE) $(AM_MAKEFLAGS) -k \ - $(foreach tool, $(valgrind_enabled_tools), check-valgrind-$(tool)) -else - @echo "Need to reconfigure with --enable-valgrind" -endif - -# Valgrind running -VALGRIND_TESTS_ENVIRONMENT = \ - $(TESTS_ENVIRONMENT) \ - env VALGRIND=$(VALGRIND) \ - G_SLICE=always-malloc,debug-blocks \ - G_DEBUG=fatal-warnings,fatal-criticals,gc-friendly - -VALGRIND_LOG_COMPILER = \ - $(valgrind_lt) \ - $(VALGRIND) $(VALGRIND_SUPPRESSIONS) --error-exitcode=1 $(VALGRIND_FLAGS) - -define valgrind_tool_rule -check-valgrind-$(1)-am: -ifeq ($$(VALGRIND_ENABLED)-$$(ENABLE_VALGRIND_$(1)),yes-yes) -ifneq ($$(TESTS),) - $$(valgrind_v_use)$$(MAKE) check-TESTS \ - TESTS_ENVIRONMENT="$$(VALGRIND_TESTS_ENVIRONMENT)" \ - LOG_COMPILER="$$(VALGRIND_LOG_COMPILER)" \ - LOG_FLAGS="$$(valgrind_$(1)_flags)" \ - TEST_SUITE_LOG=test-suite-$(1).log -endif -else ifeq ($$(VALGRIND_ENABLED),yes) - @echo "Need to reconfigure with --enable-valgrind-$(1)" -else - @echo "Need to reconfigure with --enable-valgrind" -endif -endef - -$(foreach tool,$(valgrind_tools),$(eval $(call valgrind_tool_rule,$(tool)))) - -A''M_DISTCHECK_CONFIGURE_FLAGS ?= -A''M_DISTCHECK_CONFIGURE_FLAGS += --disable-valgrind - -MOSTLYCLEANFILES ?= -MOSTLYCLEANFILES += $(valgrind_log_files) - -.PHONY: check-valgrind $(add-prefix check-valgrind-,$(valgrind_tools)) -'] - - AC_SUBST([VALGRIND_CHECK_RULES]) - m4_ifdef([_AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE([VALGRIND_CHECK_RULES])]) -]) diff --git a/script/README.md b/script/README.md deleted file mode 100644 index f307ee4..0000000 --- a/script/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# script - -In this folder we collect various useful scripts. - -NOTE: All scripts are expecting to be called from project root directory. diff --git a/script/config_template.sh b/script/config_template.sh deleted file mode 100755 index 643e8cb..0000000 --- a/script/config_template.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# Config script for custom library and header paths or C++ compiler choice -# -# If you want to edit this template script/config_template.sh, -# copy it over to something like config.sh and edit the gitignored copy. -# - -# C++ compiler -export CXX="g++" - -# C++ flags -export CXXFLAGS="" - -# GSL Library -GSL_L="-L/usr/local/lib" -GSL_I="-I/usr/local/include" - - -# ! DO NOT EDIT THE FOLLOWING ! - -# linker flags -export LDFLAGS="${GSL_L}" - -# pre-processor flags -export CPPFLAGS="${GSL_I}" diff --git a/script/generate_ac_config_files.sh b/script/generate_ac_config_files.sh deleted file mode 100755 index a264112..0000000 --- a/script/generate_ac_config_files.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -# echo a AC_CONFIG_FILES block containing a Makefile for every Makefile.am found -echo "AC_CONFIG_FILES([" -find . -name Makefile.am | sed 's/\.\///g' | sed 's/\.am//g' -echo "])" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 0000000..0bd9f8d --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,5 @@ +file(GLOB SOURCES "*/*.cpp") +add_library(ffnn SHARED ${SOURCES}) +target_link_libraries(ffnn "${GSL_LIBRARIES}" "${OpenMP_CXX_LIBRARIES}") # shared libs +add_library(ffnn_static STATIC ${SOURCES}) +target_link_libraries(ffnn_static "${GSL_LIBRARIES}" "${OpenMP_CXX_LIBRARIES}") # static (+ some shared) libs diff --git a/src/README.md b/src/README.md deleted file mode 100644 index 584f12c..0000000 --- a/src/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# src - -Here you find all source files of the library. To build the library, do `cd ../lib && make` or `cd ../ && make lib`. diff --git a/src/actf/ActivationFunctionManager.cpp b/src/actf/ActivationFunctionManager.cpp index 20add3d..6093c34 100644 --- a/src/actf/ActivationFunctionManager.cpp +++ b/src/actf/ActivationFunctionManager.cpp @@ -1,4 +1,4 @@ -#include "ActivationFunctionManager.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" #include @@ -12,6 +12,7 @@ namespace std_actf{ SELUActivationFunction selu_actf = SELUActivationFunction(); SRLUActivationFunction srlu_actf = SRLUActivationFunction(); SineActivationFunction sin_actf = SineActivationFunction(); + ExponentialActivationFunction exp_actf = ExponentialActivationFunction(); std::vector supported_actf = { &id_actf, @@ -21,7 +22,8 @@ namespace std_actf{ &relu_actf, &selu_actf, &srlu_actf, - &sin_actf + &sin_actf, + &exp_actf }; ActivationFunctionInterface * provideActivationFunction(const std::string &idCode, const std::string ¶ms){ diff --git a/src/actf/ExponentialActivationFunction.cpp b/src/actf/ExponentialActivationFunction.cpp new file mode 100644 index 0000000..137305a --- /dev/null +++ b/src/actf/ExponentialActivationFunction.cpp @@ -0,0 +1,38 @@ +#include "ffnn/actf/ExponentialActivationFunction.hpp" + +#include + + +// Activation Function Interface implementation + + +double ExponentialActivationFunction::f(const double &in) +{ + return exp(in); +} + + +double ExponentialActivationFunction::f1d(const double &in) +{ + return exp(in); +} + + +double ExponentialActivationFunction::f2d(const double &in) +{ + return exp(in); +} + + +double ExponentialActivationFunction::f3d(const double &in) +{ + return exp(in); +} + +void ExponentialActivationFunction::fad(const double &in, double &v, double &v1d, double &v2d, double &v3d, const bool flag_d1, const bool flag_d2, const bool flag_d3) +{ + v = exp(in); + v1d = flag_d1 ? v : 0.; + v2d = flag_d2 ? v : 0.; + v3d = flag_d3 ? v : 0.; +} diff --git a/src/actf/GaussianActivationFunction.cpp b/src/actf/GaussianActivationFunction.cpp index d540f3c..b2f0091 100644 --- a/src/actf/GaussianActivationFunction.cpp +++ b/src/actf/GaussianActivationFunction.cpp @@ -1,4 +1,4 @@ -#include "GaussianActivationFunction.hpp" +#include "ffnn/actf/GaussianActivationFunction.hpp" #include diff --git a/src/actf/LogisticActivationFunction.cpp b/src/actf/LogisticActivationFunction.cpp index b90601e..3712b8b 100644 --- a/src/actf/LogisticActivationFunction.cpp +++ b/src/actf/LogisticActivationFunction.cpp @@ -1,4 +1,4 @@ -#include "LogisticActivationFunction.hpp" +#include "ffnn/actf/LogisticActivationFunction.hpp" #include diff --git a/src/actf/ReLUActivationFunction.cpp b/src/actf/ReLUActivationFunction.cpp index fe84718..7320ba4 100644 --- a/src/actf/ReLUActivationFunction.cpp +++ b/src/actf/ReLUActivationFunction.cpp @@ -1,5 +1,5 @@ -#include "ReLUActivationFunction.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/actf/ReLUActivationFunction.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/src/actf/SELUActivationFunction.cpp b/src/actf/SELUActivationFunction.cpp index e9399fc..19682b9 100644 --- a/src/actf/SELUActivationFunction.cpp +++ b/src/actf/SELUActivationFunction.cpp @@ -1,5 +1,5 @@ -#include "SELUActivationFunction.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/actf/SELUActivationFunction.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/src/actf/SRLUActivationFunction.cpp b/src/actf/SRLUActivationFunction.cpp index 7a08c51..813c3be 100644 --- a/src/actf/SRLUActivationFunction.cpp +++ b/src/actf/SRLUActivationFunction.cpp @@ -1,4 +1,4 @@ -#include "SRLUActivationFunction.hpp" +#include "ffnn/actf/SRLUActivationFunction.hpp" #include diff --git a/src/actf/SineActivationFunction.cpp b/src/actf/SineActivationFunction.cpp index 9f1422c..50c5170 100644 --- a/src/actf/SineActivationFunction.cpp +++ b/src/actf/SineActivationFunction.cpp @@ -1,4 +1,4 @@ -#include "SineActivationFunction.hpp" +#include "ffnn/actf/SineActivationFunction.hpp" #include diff --git a/src/actf/TanSigmoidActivationFunction.cpp b/src/actf/TanSigmoidActivationFunction.cpp index c6e0465..bf3251d 100644 --- a/src/actf/TanSigmoidActivationFunction.cpp +++ b/src/actf/TanSigmoidActivationFunction.cpp @@ -1,4 +1,4 @@ -#include "TanSigmoidActivationFunction.hpp" +#include "ffnn/actf/TanSigmoidActivationFunction.hpp" #include diff --git a/src/feeder/FeederInterface.cpp b/src/feed/FeederInterface.cpp similarity index 92% rename from src/feeder/FeederInterface.cpp rename to src/feed/FeederInterface.cpp index 72b4f7e..dfc26f3 100644 --- a/src/feeder/FeederInterface.cpp +++ b/src/feed/FeederInterface.cpp @@ -1,7 +1,7 @@ -#include "FeederInterface.hpp" -#include "NetworkLayer.hpp" -#include "NetworkUnit.hpp" -#include "FedUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/unit/FedUnit.hpp" #include // --- Base Destructor @@ -108,8 +108,5 @@ bool FeederInterface::isVPIndexUsedInSources(const int &id) bool FeederInterface::isVPIndexUsedForFeeder(const int &id) { - if ( isVPIndexUsedInFeeder(id) || isVPIndexUsedInSources(id) ) { - return true; - } - else return false; + return ( isVPIndexUsedInFeeder(id) || isVPIndexUsedInSources(id) ); } diff --git a/src/feeder/NNRay.cpp b/src/feed/NNRay.cpp similarity index 65% rename from src/feeder/NNRay.cpp rename to src/feed/NNRay.cpp index a608f34..e09a6df 100644 --- a/src/feeder/NNRay.cpp +++ b/src/feed/NNRay.cpp @@ -1,6 +1,6 @@ -#include "NNRay.hpp" -#include "NetworkUnit.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/feed/NNRay.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include @@ -107,61 +107,49 @@ double NNRay::getSecondDerivativeFeed(const int &i2d){ double NNRay::getVariationalFirstDerivativeFeed(const int &iv1d){ - double feed = 0.; - - if (iv1d < _vp_id_shift+(int)_vp.size()) { + if (iv1d >= _vp_id_shift) { // if the variational parameter with index iv1d is in the ray add the following element - if (iv1d >= _vp_id_shift) { - feed += _sources[ iv1d - _vp_id_shift ]->getValue(); - } - else { - // add source components - for (size_t i=0; i<_map_index_to_sources[iv1d].size(); ++i) { - feed += _beta[_map_index_to_sources[iv1d][i]] * _sources[_map_index_to_sources[iv1d][i]]->getVariationalFirstDerivativeValue(iv1d); - } + return _sources[ iv1d - _vp_id_shift ]->getValue(); + } + else { + // else add source components + double feed = 0.; + for (size_t i=0; i<_map_index_to_sources[iv1d].size(); ++i) { + feed += _beta[_map_index_to_sources[iv1d][i]] * _sources[_map_index_to_sources[iv1d][i]]->getVariationalFirstDerivativeValue(iv1d); } + return feed; } - - return feed; } double NNRay::getCrossFirstDerivativeFeed(const int &i1d, const int &iv1d){ - double feed = 0.; - - if (iv1d < _vp_id_shift+(int)_vp.size()) { + if (iv1d >= _vp_id_shift) { // if the variational parameter with index iv1d is in the ray add the following element - if (iv1d >= _vp_id_shift) { - feed += _sources[ iv1d - _vp_id_shift ]->getFirstDerivativeValue(i1d); - } - else { - // add source components - for (size_t i=0; i<_map_index_to_sources[iv1d].size(); ++i) { - feed += _beta[_map_index_to_sources[iv1d][i]] * _sources[_map_index_to_sources[iv1d][i]]->getCrossFirstDerivativeValue(i1d, iv1d); - } + return _sources[ iv1d - _vp_id_shift ]->getFirstDerivativeValue(i1d); + } + else { + // else add source components + double feed = 0.; + for (size_t i=0; i<_map_index_to_sources[iv1d].size(); ++i) { + feed += _beta[_map_index_to_sources[iv1d][i]] * _sources[_map_index_to_sources[iv1d][i]]->getCrossFirstDerivativeValue(i1d, iv1d); } + return feed; } - - return feed; } double NNRay::getCrossSecondDerivativeFeed(const int &i2d, const int &iv2d){ - double feed = 0.; - - if (iv2d < _vp_id_shift+(int)_vp.size()) { + if (iv2d >= _vp_id_shift) { // if the variational parameter with index iv2d is in the ray add the following element - if (iv2d >= _vp_id_shift) { - feed += _sources[ iv2d - _vp_id_shift ]->getSecondDerivativeValue(i2d); - } - else { - // add source components - for (size_t i=0; i<_map_index_to_sources[iv2d].size(); ++i) { - feed += _beta[_map_index_to_sources[iv2d][i]] * _sources[_map_index_to_sources[iv2d][i]]->getCrossSecondDerivativeValue(i2d, iv2d); - } + return _sources[ iv2d - _vp_id_shift ]->getSecondDerivativeValue(i2d); + } + else { + // else add source components + double feed = 0.; + for (size_t i=0; i<_map_index_to_sources[iv2d].size(); ++i) { + feed += _beta[_map_index_to_sources[iv2d][i]] * _sources[_map_index_to_sources[iv2d][i]]->getCrossSecondDerivativeValue(i2d, iv2d); } + return feed; } - - return feed; } diff --git a/src/feeder/SmartBetaGenerator.cpp b/src/feed/SmartBetaGenerator.cpp similarity index 94% rename from src/feeder/SmartBetaGenerator.cpp rename to src/feed/SmartBetaGenerator.cpp index 07a7800..ca33019 100644 --- a/src/feeder/SmartBetaGenerator.cpp +++ b/src/feed/SmartBetaGenerator.cpp @@ -1,11 +1,11 @@ -#include "SmartBetaGenerator.hpp" +#include "ffnn/feed/SmartBetaGenerator.hpp" -#include "ActivationFunctionInterface.hpp" -#include "NetworkUnit.hpp" -#include "FedUnit.hpp" -#include "NNUnit.hpp" -#include "FeederInterface.hpp" -#include "NNRay.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/unit/NetworkUnit.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/unit/NNUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/feed/NNRay.hpp" #include #include @@ -191,7 +191,7 @@ namespace smart_beta{ vector beta_v; FeederInterface * rayj = L->getFedUnit(idx[j])->getFeeder(); for (int ib=BETA_INDEX_OFFSET; ibgetNBeta(); ++ib) beta_v.push_back(rayj->getBeta(ib)); - const double dot_product = abs(inner_product(begin(beta_u), end(beta_u), begin(beta_v), 0.0))/inner_product(begin(beta_u), end(beta_u), begin(beta_u), 0.0); + const double dot_product = fabs(inner_product(begin(beta_u), end(beta_u), begin(beta_v), 0.0))/inner_product(begin(beta_u), end(beta_u), begin(beta_u), 0.0); if (min_dot_product < 0.) min_dot_product = dot_product; if (dot_product < min_dot_product) min_dot_product = dot_product; } diff --git a/src/feeder/VariableFeeder.cpp b/src/feed/VariableFeeder.cpp similarity index 93% rename from src/feeder/VariableFeeder.cpp rename to src/feed/VariableFeeder.cpp index fa90c4d..ea9abb4 100644 --- a/src/feeder/VariableFeeder.cpp +++ b/src/feed/VariableFeeder.cpp @@ -1,4 +1,4 @@ -#include "VariableFeeder.hpp" +#include "ffnn/feed/VariableFeeder.hpp" #include @@ -85,9 +85,6 @@ bool VariableFeeder::getVariationalParameterValue(const int &id, double &value){ bool VariableFeeder::isVPIndexUsedInFeeder(const int &id) { - if ( _vp_id_shift <= id && id <_vp_id_shift+(int)_vp.size()) { - return true; - } - else return false; + return ( _vp_id_shift <= id && id <_vp_id_shift+(int)_vp.size() ); } diff --git a/src/feeder/WeightedFeeder.cpp b/src/feed/WeightedFeeder.cpp similarity index 97% rename from src/feeder/WeightedFeeder.cpp rename to src/feed/WeightedFeeder.cpp index 35265cf..4121f95 100644 --- a/src/feeder/WeightedFeeder.cpp +++ b/src/feed/WeightedFeeder.cpp @@ -1,4 +1,4 @@ -#include "WeightedFeeder.hpp" +#include "ffnn/feed/WeightedFeeder.hpp" // --- clear method diff --git a/src/feature_maps/EuclideanDistanceMap.cpp b/src/fmap/EuclideanDistanceMap.cpp similarity index 95% rename from src/feature_maps/EuclideanDistanceMap.cpp rename to src/fmap/EuclideanDistanceMap.cpp index cc3a5c8..5724fec 100644 --- a/src/feature_maps/EuclideanDistanceMap.cpp +++ b/src/fmap/EuclideanDistanceMap.cpp @@ -1,6 +1,6 @@ -#include "EuclideanDistanceMap.hpp" -#include "StringCodeUtilities.hpp" -#include "NetworkUnit.hpp" +#include "ffnn/fmap/EuclideanDistanceMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" +#include "ffnn/unit/NetworkUnit.hpp" #include #include @@ -90,7 +90,7 @@ double EuclideanDistanceMap::getFeedSigma() double sigma = 0.; for (size_t i=0; i<_ndim; ++i) { - sigma += 4.0 * srcv[i]*srcv[i] * pow(_sources[i]->getOutputSigma(), 2); // (d²/dx² sigmaX)² + sigma += 4.0 * pow(srcv[i] * _sources[i]->getOutputSigma(), 2); // (d/dx * sigmaX)² } return sqrt(sigma); diff --git a/src/feature_maps/EuclideanPairDistanceMap.cpp b/src/fmap/EuclideanPairDistanceMap.cpp similarity index 97% rename from src/feature_maps/EuclideanPairDistanceMap.cpp rename to src/fmap/EuclideanPairDistanceMap.cpp index fec11a8..816c3ea 100644 --- a/src/feature_maps/EuclideanPairDistanceMap.cpp +++ b/src/fmap/EuclideanPairDistanceMap.cpp @@ -1,6 +1,6 @@ -#include "EuclideanPairDistanceMap.hpp" -#include "StringCodeUtilities.hpp" -#include "NetworkUnit.hpp" +#include "ffnn/fmap/EuclideanPairDistanceMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" +#include "ffnn/unit/NetworkUnit.hpp" #include #include @@ -58,7 +58,7 @@ double EuclideanPairDistanceMap::getFeedSigma() double sigma = 0.; for (size_t i=0; i<_ndim; ++i) { - sigma += 2.0 * pow(srcv[i] - srcv[i+_ndim], 2) * ( pow(_sources[i]->getOutputSigma(), 2) + pow(_sources[i+_ndim]->getOutputSigma(), 2) ); // (d²/dx² sigmaX)² + sigma += 2.0 * pow(srcv[i] - srcv[i+_ndim], 2) * ( pow(_sources[i]->getOutputSigma(), 2) + pow(_sources[i+_ndim]->getOutputSigma(), 2) ); // (d/dx * sigmaX)² } return sqrt(sigma); diff --git a/src/feature_maps/FeatureMapLayer.cpp b/src/fmap/FeatureMapLayer.cpp similarity index 97% rename from src/feature_maps/FeatureMapLayer.cpp rename to src/fmap/FeatureMapLayer.cpp index 0d29fa3..35053ae 100644 --- a/src/feature_maps/FeatureMapLayer.cpp +++ b/src/fmap/FeatureMapLayer.cpp @@ -1,7 +1,7 @@ -#include "FeatureMapLayer.hpp" -#include "NetworkLayer.hpp" -#include "FedUnit.hpp" -#include "FeederInterface.hpp" +#include "ffnn/fmap/FeatureMapLayer.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" #include diff --git a/src/feature_maps/IdentityMap.cpp b/src/fmap/IdentityMap.cpp similarity index 94% rename from src/feature_maps/IdentityMap.cpp rename to src/fmap/IdentityMap.cpp index af71ded..57bb13c 100644 --- a/src/feature_maps/IdentityMap.cpp +++ b/src/fmap/IdentityMap.cpp @@ -1,5 +1,5 @@ -#include "IdentityMap.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/fmap/IdentityMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/src/feature_maps/MultiDimStaticMap.cpp b/src/fmap/MultiDimStaticMap.cpp similarity index 93% rename from src/feature_maps/MultiDimStaticMap.cpp rename to src/fmap/MultiDimStaticMap.cpp index d710ceb..2fd9fa0 100644 --- a/src/feature_maps/MultiDimStaticMap.cpp +++ b/src/fmap/MultiDimStaticMap.cpp @@ -1,6 +1,6 @@ -#include "MultiDimStaticMap.hpp" -#include "StringCodeUtilities.hpp" -#include "NetworkUnit.hpp" +#include "ffnn/fmap/MultiDimStaticMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" +#include "ffnn/unit/NetworkUnit.hpp" #include #include diff --git a/src/feature_maps/OneDimStaticMap.cpp b/src/fmap/OneDimStaticMap.cpp similarity index 90% rename from src/feature_maps/OneDimStaticMap.cpp rename to src/fmap/OneDimStaticMap.cpp index 2ea884c..12989c0 100644 --- a/src/feature_maps/OneDimStaticMap.cpp +++ b/src/fmap/OneDimStaticMap.cpp @@ -1,6 +1,6 @@ -#include "OneDimStaticMap.hpp" -#include "StringCodeUtilities.hpp" -#include "NetworkUnit.hpp" +#include "ffnn/fmap/OneDimStaticMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" +#include "ffnn/unit/NetworkUnit.hpp" #include #include diff --git a/src/feature_maps/PairDifferenceMap.cpp b/src/fmap/PairDifferenceMap.cpp similarity index 95% rename from src/feature_maps/PairDifferenceMap.cpp rename to src/fmap/PairDifferenceMap.cpp index 64855a6..d6a7f13 100644 --- a/src/feature_maps/PairDifferenceMap.cpp +++ b/src/fmap/PairDifferenceMap.cpp @@ -1,5 +1,5 @@ -#include "PairDifferenceMap.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/fmap/PairDifferenceMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/src/feature_maps/PairSumMap.cpp b/src/fmap/PairSumMap.cpp similarity index 96% rename from src/feature_maps/PairSumMap.cpp rename to src/fmap/PairSumMap.cpp index ebc7b48..25c34aa 100644 --- a/src/feature_maps/PairSumMap.cpp +++ b/src/fmap/PairSumMap.cpp @@ -1,5 +1,5 @@ -#include "PairSumMap.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/fmap/PairSumMap.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/src/io/PrintUtilities.cpp b/src/io/PrintUtilities.cpp index c705dbf..a061eb5 100644 --- a/src/io/PrintUtilities.cpp +++ b/src/io/PrintUtilities.cpp @@ -1,7 +1,7 @@ -#include "PrintUtilities.hpp" -#include "FeederInterface.hpp" -#include "FedUnit.hpp" -#include "NNUnit.hpp" +#include "ffnn/io/PrintUtilities.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/unit/NNUnit.hpp" #include #include diff --git a/src/layer/FedLayer.cpp b/src/layer/FedLayer.cpp index 2c3cd08..d3d5eaf 100644 --- a/src/layer/FedLayer.cpp +++ b/src/layer/FedLayer.cpp @@ -1,6 +1,6 @@ -#include "FedLayer.hpp" -#include "FeederInterface.hpp" -#include "FedUnit.hpp" +#include "ffnn/layer/FedLayer.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/unit/FedUnit.hpp" #include @@ -113,3 +113,26 @@ void FedLayer::disconnect() _U_fed[i]->setFeeder(NULL); } } + + +// --- Compute Values (with OMP pragma) + +void FedLayer::computeValues() +{ +#ifdef OPENMP + // compile with -DOPENMP -fopenmp flags to use parallelization here + + if (this->getNUnits()>2) { +#pragma omp for schedule(static, 1) + for (std::vector::size_type i=0; i<_U.size(); ++i) _U[i]->computeValues(); + } + else { +#pragma omp single +#endif + + for (std::vector::size_type i=0; i<_U.size(); ++i) _U[i]->computeValues(); + +#ifdef OPENMP + } +#endif +} diff --git a/src/layer/InputLayer.cpp b/src/layer/InputLayer.cpp index dc1283c..5d542aa 100644 --- a/src/layer/InputLayer.cpp +++ b/src/layer/InputLayer.cpp @@ -1,5 +1,5 @@ -#include "InputLayer.hpp" -#include "InputUnit.hpp" +#include "ffnn/layer/InputLayer.hpp" +#include "ffnn/unit/InputUnit.hpp" // --- Register Unit diff --git a/src/layer/NNLayer.cpp b/src/layer/NNLayer.cpp index a7b4609..4f45946 100644 --- a/src/layer/NNLayer.cpp +++ b/src/layer/NNLayer.cpp @@ -1,8 +1,8 @@ -#include "NNLayer.hpp" +#include "ffnn/layer/NNLayer.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" -#include "NNUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/unit/NNUnit.hpp" // --- Register Unit diff --git a/src/layer/NetworkLayer.cpp b/src/layer/NetworkLayer.cpp index 8453907..231de14 100644 --- a/src/layer/NetworkLayer.cpp +++ b/src/layer/NetworkLayer.cpp @@ -1,6 +1,6 @@ -#include "NetworkLayer.hpp" -#include "OffsetUnit.hpp" -#include "NetworkUnit.hpp" +#include "ffnn/layer/NetworkLayer.hpp" +#include "ffnn/unit/OffsetUnit.hpp" +#include "ffnn/unit/NetworkUnit.hpp" #include #include @@ -65,26 +65,35 @@ void NetworkLayer::setSize(const int &nunits) // --- Values to compute -void NetworkLayer::addCrossSecondDerivativeSubstrate(const int &nx0, const int &nvp) +void NetworkLayer::addCrossSecondDerivativeSubstrate(const int &nx0) { - for (std::vector::size_type i=0; i<_U.size(); ++i){ - _U[i]->setCrossSecondDerivativeSubstrate(nx0, nvp); + const int nvp = this->getMaxVariationalParameterIndex()+1; + if (nvp > 0) { + for (std::vector::size_type i=0; i<_U.size(); ++i){ + _U[i]->setCrossSecondDerivativeSubstrate(nx0, nvp); + } } } -void NetworkLayer::addCrossFirstDerivativeSubstrate(const int &nx0, const int &nvp) +void NetworkLayer::addCrossFirstDerivativeSubstrate(const int &nx0) { - for (std::vector::size_type i=0; i<_U.size(); ++i){ - _U[i]->setCrossFirstDerivativeSubstrate(nx0, nvp); + const int nvp = this->getMaxVariationalParameterIndex()+1; + if (nvp > 0) { + for (std::vector::size_type i=0; i<_U.size(); ++i){ + _U[i]->setCrossFirstDerivativeSubstrate(nx0, nvp); + } } } -void NetworkLayer::addVariationalFirstDerivativeSubstrate(const int &nvp) +void NetworkLayer::addVariationalFirstDerivativeSubstrate() { - for (std::vector::size_type i=0; i<_U.size(); ++i){ - _U[i]->setVariationalFirstDerivativeSubstrate(nvp); + const int nvp = this->getMaxVariationalParameterIndex()+1; + if (nvp > 0) { + for (std::vector::size_type i=0; i<_U.size(); ++i){ + _U[i]->setVariationalFirstDerivativeSubstrate(nvp); + } } } @@ -109,8 +118,8 @@ void NetworkLayer::addFirstDerivativeSubstrate(const int &nx0) void NetworkLayer::computeValues() { -#ifdef OPENMP -#pragma omp for schedule(static, 1) -#endif + #ifdef OPENMP + #pragma omp single // per default (FedLayer overwrites this method with omp for instead) + #endif for (std::vector::size_type i=0; i<_U.size(); ++i) _U[i]->computeValues(); } diff --git a/src/layer/OutputNNLayer.cpp b/src/layer/OutputNNLayer.cpp index df2711c..de630d9 100644 --- a/src/layer/OutputNNLayer.cpp +++ b/src/layer/OutputNNLayer.cpp @@ -1,8 +1,8 @@ -#include "OutputNNLayer.hpp" +#include "ffnn/layer/OutputNNLayer.hpp" -#include "ActivationFunctionInterface.hpp" -#include "ActivationFunctionManager.hpp" -#include "OutputNNUnit.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/unit/OutputNNUnit.hpp" // --- Register Unit diff --git a/src/network/FeedForwardNeuralNetwork.cpp b/src/net/FeedForwardNeuralNetwork.cpp similarity index 93% rename from src/network/FeedForwardNeuralNetwork.cpp rename to src/net/FeedForwardNeuralNetwork.cpp index a99a5f7..79e768c 100644 --- a/src/network/FeedForwardNeuralNetwork.cpp +++ b/src/net/FeedForwardNeuralNetwork.cpp @@ -1,8 +1,8 @@ -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" -#include "NNUnit.hpp" -#include "ActivationFunctionManager.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/unit/NNUnit.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include @@ -12,10 +12,6 @@ #include #include -#ifdef OPENMP -#include //to detect the number of hardware threads on the system -#endif - // --- Beta @@ -227,12 +223,10 @@ void FeedForwardNeuralNetwork::getVariationalParameter(double * vp) int ivp = 0; for (vector::size_type i=0; i<_L.size(); ++i) { int idmax = _L[i]->getMaxVariationalParameterIndex(); - if (ivp<=idmax) { - for ( ; ivpgetVariationalParameter(ivp, vp[ivp]); - if (!status) { - cout << endl << "ERROR FeedForwardNeuralNetwork::getVariationalParameter : index " << ivp << " not found in layer " << i << " with max index " << idmax << endl << endl; - } + for ( ; ivp<=idmax; ++ivp) { + bool status = _L[i]->getVariationalParameter(ivp, vp[ivp]); + if (!status) { + cout << endl << "ERROR FeedForwardNeuralNetwork::getVariationalParameter : index " << ivp << " not found in layer " << i << " with max index " << idmax << endl << endl; } } } @@ -247,8 +241,9 @@ void FeedForwardNeuralNetwork::setVariationalParameter(const int &ivp, const dou } for (vector::size_type i=0; i<_L.size(); ++i) { if (ivp<=_L[i]->getMaxVariationalParameterIndex()) { - _L[i]->setVariationalParameter(ivp, vp); - return; + bool status = _L[i]->setVariationalParameter(ivp, vp); + if (status) return; + else break; } } cout << endl << "ERROR FeedForwardNeuralNetwork::setVariationalParameter : index " << ivp << " not found" << endl << endl; @@ -262,12 +257,10 @@ void FeedForwardNeuralNetwork::setVariationalParameter(const double * vp) int ivp = 0; for (vector::size_type i=0; i<_L.size(); ++i) { int idmax = _L[i]->getMaxVariationalParameterIndex(); - if (ivp<=idmax) { - for ( ; ivp<=idmax; ++ivp) { - bool status = _L[i]->setVariationalParameter(ivp, vp[ivp]); - if (!status) { - cout << endl << "ERROR FeedForwardNeuralNetwork::setVariationalParameter : index " << ivp << " not found in layer " << i << " with max index " << idmax << endl << endl; - } + for ( ; ivp<=idmax; ++ivp) { + bool status = _L[i]->setVariationalParameter(ivp, vp[ivp]); + if (!status) { + cout << endl << "ERROR FeedForwardNeuralNetwork::setVariationalParameter : index " << ivp << " not found in layer " << i << " with max index " << idmax << endl << endl; } } } @@ -423,35 +416,15 @@ void FeedForwardNeuralNetwork::evaluate(const double * in, double * out, double } } -#ifdef OPENMP -bool compare_NUnits(NetworkLayer * A, NetworkLayer * B) { return A->getNUnits()getNUnits(); } -#endif - void FeedForwardNeuralNetwork::FFPropagate() { - _L_in->computeValues(); // OpenMP not worth for input layer - -#ifdef OPENMP -// compile with -DOPENMP -fopenmp flags to use parallelization here - - int nthreads = std::min( (int)std::thread::hardware_concurrency(), (*std::max_element(_L.begin()+1, _L.end(), compare_NUnits))->getNUnits() - 1 ); - if (nthreads>1) { -#pragma omp parallel num_threads(nthreads) - for (std::vector::size_type i=1; i<_L.size(); ++i) - { - _L[i]->computeValues(); // actual omp for inside computeValues -#pragma omp barrier // just to be sure - } - } - else { -#endif - for (std::vector::size_type i=1; i<_L.size(); ++i) + #ifdef OPENMP + #pragma omp parallel default(none) + #endif + for (std::vector::size_type i=0; i<_L.size(); ++i) { _L[i]->computeValues(); } -#ifdef OPENMP - } -#endif } @@ -488,7 +461,7 @@ void FeedForwardNeuralNetwork::addCrossSecondDerivativeSubstrate() // set the substrate in the units for (std::vector::size_type i=0; i<_L.size(); ++i){ - _L[i]->addCrossSecondDerivativeSubstrate(getNInput(), _nvp); + _L[i]->addCrossSecondDerivativeSubstrate(getNInput()); } _flag_c2d = true; @@ -508,7 +481,7 @@ void FeedForwardNeuralNetwork::addCrossFirstDerivativeSubstrate() // set the substrate in the units for (std::vector::size_type i=0; i<_L.size(); ++i) { - _L[i]->addCrossFirstDerivativeSubstrate(getNInput(), _nvp); + _L[i]->addCrossFirstDerivativeSubstrate(getNInput()); } _flag_c1d = true; @@ -522,7 +495,7 @@ void FeedForwardNeuralNetwork::addVariationalFirstDerivativeSubstrate() // set the substrate in the units for (std::vector::size_type i=0; i<_L.size(); ++i) { - _L[i]->addVariationalFirstDerivativeSubstrate(_nvp); + _L[i]->addVariationalFirstDerivativeSubstrate(); } _flag_v1d = true; diff --git a/src/serialize/StringCodeUtilities.cpp b/src/serial/StringCodeUtilities.cpp similarity index 99% rename from src/serialize/StringCodeUtilities.cpp rename to src/serial/StringCodeUtilities.cpp index ffec457..91a3581 100644 --- a/src/serialize/StringCodeUtilities.cpp +++ b/src/serial/StringCodeUtilities.cpp @@ -1,4 +1,4 @@ -#include "StringCodeUtilities.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/src/trainer/NNTrainer.cpp b/src/train/NNTrainer.cpp similarity index 95% rename from src/trainer/NNTrainer.cpp rename to src/train/NNTrainer.cpp index 794493d..d5123db 100644 --- a/src/trainer/NNTrainer.cpp +++ b/src/train/NNTrainer.cpp @@ -1,8 +1,9 @@ -#include "NNTrainer.hpp" -#include "SmartBetaGenerator.hpp" +#include "ffnn/train/NNTrainer.hpp" +#include "ffnn/feed/SmartBetaGenerator.hpp" #include #include +#include // --- Helpers @@ -129,6 +130,12 @@ void NNTrainer::bestFit(FeedForwardNeuralNetwork * const ffnn, double * bestfit, while(true) { // initial parameters if (flag_smart_beta) smart_beta::generateSmartBeta(ffnn); + else if (ffnn->getNFeatureMapLayers() > 0) { // hack because of fitting problems when using FMLs + random_device rdev; + mt19937_64 rgen = std::mt19937_64(rdev()); + uniform_real_distribution rd(-0.1,0.1); + for (int i=0; igetNBeta(); ++i) ffnn->setBeta(i, rd(rgen)); + } else ffnn->randomizeBetas(); findFit(ffnn, fit, err, verbose); // try new fit diff --git a/src/trainer/NNTrainerGSL.cpp b/src/train/NNTrainerGSL.cpp similarity index 99% rename from src/trainer/NNTrainerGSL.cpp rename to src/train/NNTrainerGSL.cpp index 22fd6aa..371d2c0 100644 --- a/src/trainer/NNTrainerGSL.cpp +++ b/src/train/NNTrainerGSL.cpp @@ -1,4 +1,4 @@ -#include "NNTrainerGSL.hpp" +#include "ffnn/train/NNTrainerGSL.hpp" #include #include diff --git a/src/unit/ActivationUnit.cpp b/src/unit/ActivationUnit.cpp index 445623f..2d70213 100644 --- a/src/unit/ActivationUnit.cpp +++ b/src/unit/ActivationUnit.cpp @@ -1,6 +1,6 @@ -#include "ActivationUnit.hpp" -#include "StringCodeUtilities.hpp" -#include "ActivationFunctionManager.hpp" +#include "ffnn/unit/ActivationUnit.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" #include diff --git a/src/unit/FedUnit.cpp b/src/unit/FedUnit.cpp index 652dd46..28f0335 100644 --- a/src/unit/FedUnit.cpp +++ b/src/unit/FedUnit.cpp @@ -1,14 +1,15 @@ -#include "FedUnit.hpp" -#include "StringCodeUtilities.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include // for NULL - // --- Computation void FedUnit::computeFeed(){ if (_feeder){ + const int mynvp = _feeder->getMaxVariationalParameterIndex()+1; + // unit value _pv = _feeder->getFeed(); @@ -22,17 +23,17 @@ void FedUnit::computeFeed(){ } if (_first_var_der) { - for (int j=0; j<_nvp; ++j) _first_var_der[j] = _feeder->getVariationalFirstDerivativeFeed(j); + for (int j=0; jgetVariationalFirstDerivativeFeed(j); } if (_cross_first_der) { - for (int j=0; j<_nvp; ++j) { + for (int j=0; jgetCrossFirstDerivativeFeed(i, j); } } if (_cross_second_der) { - for (int j=0; j<_nvp; ++j) { + for (int j=0; jgetCrossSecondDerivativeFeed(i, j); } } @@ -42,6 +43,8 @@ void FedUnit::computeFeed(){ void FedUnit::computeDerivatives(){ if (_feeder) { + const int mynvp = _feeder->getMaxVariationalParameterIndex()+1; + // first derivative if (_v1d){ for (int i=0; i<_nx0; ++i) @@ -58,7 +61,7 @@ void FedUnit::computeDerivatives(){ } // variational first derivative if (_v1vd){ - for (int i=0; i<_nvp; ++i) + for (int i=0; i // for NULL diff --git a/src/unit/OutputNNUnit.cpp b/src/unit/OutputNNUnit.cpp index b81bcab..5a14d0a 100644 --- a/src/unit/OutputNNUnit.cpp +++ b/src/unit/OutputNNUnit.cpp @@ -1,4 +1,4 @@ -#include "OutputNNUnit.hpp" +#include "ffnn/unit/OutputNNUnit.hpp" void OutputNNUnit::setOutputBounds(const double &lbound, const double &ubound) { diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt new file mode 100644 index 0000000..c1e07d1 --- /dev/null +++ b/test/CMakeLists.txt @@ -0,0 +1,25 @@ +include_directories(common/) +link_libraries(ffnn) + +add_executable(check main.cpp) +add_executable(ut1.exe ut1/main.cpp) +add_executable(ut2.exe ut2/main.cpp) +add_executable(ut3.exe ut3/main.cpp) +add_executable(ut4.exe ut4/main.cpp) +add_executable(ut5.exe ut5/main.cpp) +add_executable(ut6.exe ut6/main.cpp) +add_executable(ut7.exe ut7/main.cpp) +add_executable(ut8.exe ut8/main.cpp) +add_executable(ut9.exe ut9/main.cpp) +add_executable(ut10.exe ut10/main.cpp) + +add_test(ut1 ut1.exe) +add_test(ut2 ut2.exe) +add_test(ut3 ut3.exe) +add_test(ut4 ut4.exe) +add_test(ut5 ut5.exe) +add_test(ut6 ut6.exe) +add_test(ut7 ut7.exe) +add_test(ut8 ut8.exe) +add_test(ut9 ut9.exe) +add_test(ut10 ut10.exe) diff --git a/test/Makefile.am b/test/Makefile.am deleted file mode 100644 index 037128f..0000000 --- a/test/Makefile.am +++ /dev/null @@ -1,14 +0,0 @@ -include test.am - -SUBDIRS = ut1 ut2 ut3 ut4 ut5 ut6 ut7 ut8 ut9 ut10 - -TESTS = exe ut1/exe ut2/exe ut3/exe ut4/exe ut5/exe ut6/exe ut7/exe ut8/exe ut9/exe ut10/exe -AUTOMAKE_OPTIONS = parallel-tests -if !OPENMP -if VALGRIND_ENABLED - @VALGRIND_CHECK_RULES@ - LOG_COMPILER = $(LIBTOOL) e $(VALGRIND) --leak-check=full --track-origins=yes -else - LOG_COMPILER = $(LIBTOOL) e -endif -endif diff --git a/test/README.md b/test/README.md index 8f61fe0..7ccfcd5 100644 --- a/test/README.md +++ b/test/README.md @@ -1,6 +1,7 @@ # LEGEND OF THE UNIT TESTS -Use `make check` either inside test directory (i.e. run all tests) or in a specific unittest folder. +Use `./run.sh` inside the test directory to run the check program and unit tests +with valgrind or use `make test` inside the build directory, to run unit tests without valgrind. ## Unit Test 1 @@ -54,3 +55,9 @@ Use `make check` either inside test directory (i.e. run all tests) or in a speci ## Unit Test 9 `ut9/`: check that the trainers find perfect fits for a target function resembling a NN + + + +## Unit Test 10 + +`ut10/`: check the derivatives and file storing when feature maps are used diff --git a/test/common/checkDerivatives.hpp b/test/common/checkDerivatives.hpp index 7d8af8c..ee41d71 100644 --- a/test/common/checkDerivatives.hpp +++ b/test/common/checkDerivatives.hpp @@ -2,7 +2,7 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" void checkDerivatives(FeedForwardNeuralNetwork * const ffnn, const double &TINY) { diff --git a/test/common/checkStoreOnFile.hpp b/test/common/checkStoreOnFile.hpp index 59487ca..c365457 100644 --- a/test/common/checkStoreOnFile.hpp +++ b/test/common/checkStoreOnFile.hpp @@ -2,7 +2,7 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" // expects a neural network without substrates but optionally with connection void checkStoreOnFile(FeedForwardNeuralNetwork * const ffnn, const bool isConnected = false) diff --git a/test/main.cpp b/test/main.cpp index 3c9a284..c8c8679 100644 --- a/test/main.cpp +++ b/test/main.cpp @@ -2,7 +2,7 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" void printNNStructure(FeedForwardNeuralNetwork &nn) diff --git a/test/run.sh b/test/run.sh new file mode 100755 index 0000000..cdd0266 --- /dev/null +++ b/test/run.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +VALGRIND="valgrind --leak-check=full --track-origins=yes" + +cd ../build/test/ +${VALGRIND} ./check +for exe in ./ut*.exe; do + echo + echo "Running test ${exe}..." + ${VALGRIND} ${exe} + echo +done diff --git a/test/test.am b/test/test.am deleted file mode 100644 index 34ac8d3..0000000 --- a/test/test.am +++ /dev/null @@ -1,14 +0,0 @@ -AM_LDFLAGS += -lffnn - -if !DEBUG # then we want to add debug flags manually - AM_CXXFLAGS += $(DEBUGFLAGS) -endif - -noinst_PROGRAMS = exe -exe_SOURCES = main.cpp - -clean-local: - rm -f *.txt -if VALGRIND_ENABLED - rm -f vgcore.* -endif diff --git a/test/ut1/Makefile.am b/test/ut1/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut1/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut1/main.cpp b/test/ut1/main.cpp index 0848b5d..f11b58e 100644 --- a/test/ut1/main.cpp +++ b/test/ut1/main.cpp @@ -1,7 +1,7 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" #include "../common/checkDerivatives.hpp" int main(){ diff --git a/test/ut10/Makefile.am b/test/ut10/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut10/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut10/main.cpp b/test/ut10/main.cpp index f2eaaf0..1be7171 100644 --- a/test/ut10/main.cpp +++ b/test/ut10/main.cpp @@ -1,8 +1,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/io/PrintUtilities.hpp" #include "../common/checkDerivatives.hpp" #include "../common/checkStoreOnFile.hpp" @@ -21,7 +21,8 @@ int main() ffnn->pushFeatureMapLayer(4); ffnn->getFeatureMapLayer(1)->setNMaps(1, 1, 0, 1, 0); // we specify only 3 units - ffnn->getFeatureMapLayer(1)->setSize(6); // now the other 2 should be defaulted to IDMU + ffnn->getFeatureMapLayer(1)->setSize(6); // now the other 2 should be defaulted to IDMU (generates warning) + ffnn->getFeatureMapLayer(1)->setNMaps(1, 1, 0, 1, 2); // to suppress further warning on copies //printFFNNStructure(ffnn); diff --git a/test/ut2/Makefile.am b/test/ut2/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut2/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut2/main.cpp b/test/ut2/main.cpp index a3eeb73..1d6cc0c 100644 --- a/test/ut2/main.cpp +++ b/test/ut2/main.cpp @@ -1,5 +1,5 @@ -#include "FeedForwardNeuralNetwork.hpp" -#include "ActivationFunctionManager.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" #include "../common/checkStoreOnFile.hpp" diff --git a/test/ut3/Makefile.am b/test/ut3/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut3/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut3/main.cpp b/test/ut3/main.cpp index bd18173..3789ebd 100644 --- a/test/ut3/main.cpp +++ b/test/ut3/main.cpp @@ -2,9 +2,9 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "ActivationFunctionManager.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/io/PrintUtilities.hpp" diff --git a/test/ut4/Makefile.am b/test/ut4/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut4/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut4/main.cpp b/test/ut4/main.cpp index 3a00032..408fe0d 100644 --- a/test/ut4/main.cpp +++ b/test/ut4/main.cpp @@ -2,9 +2,9 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "ActivationFunctionManager.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/io/PrintUtilities.hpp" diff --git a/test/ut5/Makefile.am b/test/ut5/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut5/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut5/main.cpp b/test/ut5/main.cpp index 1fb363b..d9ed5ed 100644 --- a/test/ut5/main.cpp +++ b/test/ut5/main.cpp @@ -1,5 +1,5 @@ -#include "ActivationFunctionManager.hpp" -#include "ActivationFunctionInterface.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/actf/ActivationFunctionInterface.hpp" #include #include @@ -10,13 +10,15 @@ int main(){ using namespace std; - const double TINY = 0.0001; + const double TINY_DEFAULT = 0.0001; const double dx = 0.0001; vector x_to_test = {-3., -2.5, -2., -1.5, -1.0, -0.5, -0.25, -0.001, 0.001, 0.25, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; for (ActivationFunctionInterface * actf : std_actf::supported_actf){ - // cout << "actf = " << actf->getIdCode() << endl; + const double TINY = actf->getIdCode() == "EXP" ? 0.002 : TINY_DEFAULT; + // cout << "actf = " << actf->getIdCode() << endl; + for (double x : x_to_test){ // cout << " x = " << x << endl; const double f = actf->f(x); @@ -54,7 +56,7 @@ int main(){ // cout << " f3d = " << f3d << endl; // cout << " num_f3d = " << num_f3d << endl; - assert( abs(num_f3d-f3d) < TINY*20. ); + assert( abs(num_f3d-f3d) < TINY*20 ); // -- check the fad function diff --git a/test/ut6/Makefile.am b/test/ut6/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut6/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut6/main.cpp b/test/ut6/main.cpp index f96b76a..1a64aa0 100644 --- a/test/ut6/main.cpp +++ b/test/ut6/main.cpp @@ -1,4 +1,4 @@ -#include "StringCodeUtilities.hpp" +#include "ffnn/serial/StringCodeUtilities.hpp" #include #include diff --git a/test/ut7/Makefile.am b/test/ut7/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut7/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut7/main.cpp b/test/ut7/main.cpp index d669b95..1b7d3e5 100644 --- a/test/ut7/main.cpp +++ b/test/ut7/main.cpp @@ -5,12 +5,12 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "SmartBetaGenerator.hpp" -#include "ActivationFunctionManager.hpp" -#include "FedUnit.hpp" -#include "FeederInterface.hpp" -#include "PrintUtilities.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/feed/SmartBetaGenerator.hpp" +#include "ffnn/actf/ActivationFunctionManager.hpp" +#include "ffnn/unit/FedUnit.hpp" +#include "ffnn/feed/FeederInterface.hpp" +#include "ffnn/io/PrintUtilities.hpp" int main(){ diff --git a/test/ut8/Makefile.am b/test/ut8/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut8/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut8/main.cpp b/test/ut8/main.cpp index fbe4eed..9a81e8d 100644 --- a/test/ut8/main.cpp +++ b/test/ut8/main.cpp @@ -1,5 +1,5 @@ -#include "FeedForwardNeuralNetwork.hpp" -#include "NNTrainerGSL.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/train/NNTrainerGSL.hpp" #include #include diff --git a/test/ut9/Makefile.am b/test/ut9/Makefile.am deleted file mode 100644 index 5d916f2..0000000 --- a/test/ut9/Makefile.am +++ /dev/null @@ -1 +0,0 @@ -include ../test.am diff --git a/test/ut9/main.cpp b/test/ut9/main.cpp index 3a4e206..e6b52bb 100644 --- a/test/ut9/main.cpp +++ b/test/ut9/main.cpp @@ -3,8 +3,8 @@ #include #include -#include "FeedForwardNeuralNetwork.hpp" -#include "NNTrainerGSL.hpp" +#include "ffnn/net/FeedForwardNeuralNetwork.hpp" +#include "ffnn/train/NNTrainerGSL.hpp" using namespace std; using namespace nn_trainer_gsl_details; // to access hidden NNTrainerGSL methods