diff --git a/.github/workflows/dbg_smoke.yml b/.github/workflows/dbg_smoke.yml new file mode 100644 index 000000000..8fb2795ce --- /dev/null +++ b/.github/workflows/dbg_smoke.yml @@ -0,0 +1,42 @@ + +name: debug-smoke-tests + +on: [push] + +env: + BUILD_TYPE: Debug + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: Install required packages + run: sudo apt-get install -y libnuma-dev + + - name: Configure + run: mkdir build && cd build && ../bootstrap.sh --prefix=../install --debug-build + + - name: Build + working-directory: ${{github.workspace}}/build + run: make -j4 + + - name: Install + working-directory: ${{github.workspace}}/build + run: make -j4 install + + - name: Test + working-directory: ${{github.workspace}}/build + run: make -j4 smoketests &> smoketests.log + + - name: Check + working-directory: ${{github.workspace}}/build + run: ../tests/summarise.sh smoketests.log + + - name: DumpLogOnFailure + if: failure() + working-directory: ${{github.workspace}}/build + run: cat smoketests.log + diff --git a/.github/workflows/smoke.yml b/.github/workflows/smoke.yml index c302ebdb2..884b2f74f 100644 --- a/.github/workflows/smoke.yml +++ b/.github/workflows/smoke.yml @@ -1,5 +1,5 @@ -name: smoke-tests +name: release-smoke-tests on: [push] @@ -35,3 +35,8 @@ jobs: working-directory: ${{github.workspace}}/build run: ../tests/summarise.sh smoketests.log + - name: DumpLogOnFailure + if: failure() + working-directory: ${{github.workspace}}/build + run: cat smoketests.log + diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c6a81c040..60b3410d6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,7 @@ stages: # exclude: # - build/**/*.o # - build/**/*.o.d -# expire_in: 30 minutes +# expire_in: 80 minutes #build_debug_centos_8: @@ -122,13 +122,16 @@ build_test: - apt update && apt -y install make cmake libnuma-dev coreutils script: - mkdir -p install build && cd ./build && ../bootstrap.sh --prefix=../install && make -j$(nproc) build_tests_all + - strip -s $(find tests/unit/ -type f -executable -print) $(find tests/smoke/ -type f -executable -print) $(find tests/performance/ -type f -executable -print) artifacts: paths: - build/ exclude: - build/**/*.o - build/**/*.o.d - expire_in: 30 minutes + - build/**/CMakeFiles + - build/**/*.dir + expire_in: 80 minutes build_debug2_tests: @@ -222,7 +225,7 @@ build_debug: exclude: - build/**/*.o - build/**/*.o.d - expire_in: 30 minutes + expire_in: 43 minutes test_smoke_debug: diff --git a/CMakeLists.txt b/CMakeLists.txt index a7dc72dd2..344216e50 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,7 +27,7 @@ cmake_minimum_required( VERSION 3.13 ) set( MAJORVERSION 0 ) -set( MINORVERSION 6 ) +set( MINORVERSION 7 ) set( BUGVERSION 0 ) set( VERSION "${MAJORVERSION}.${MINORVERSION}.${BUGVERSION}" ) @@ -51,6 +51,8 @@ endif() # to choose backends and dependencies option( WITH_REFERENCE_BACKEND "With Reference backend" ON ) option( WITH_OMP_BACKEND "With OMP backend" ON ) +option( WITH_HYPERDAGS_BACKEND "With Hyperdags backend" ON ) +option( WITH_NONBLOCKING_BACKEND "With Nonblocking backend" ON ) option( WITH_NUMA "With NUMA support" ON ) option( LPF_INSTALL_PATH "Path to the LPF tools for the BSP1D and Hybrid backends" OFF ) # the following options depend on LPF_INSTALL_PATH being set @@ -61,6 +63,9 @@ LPF_INSTALL_PATH set)" ON LPF_INSTALL_PATH OFF cmake_dependent_option( WITH_HYBRID_BACKEND "Also build the Hybrid backend \ (needs LPF_INSTALL_PATH set)" ON LPF_INSTALL_PATH OFF ) +# other dependent options +cmake_dependent_option( WITH_HYPERDAGS_BACKEND "Building the Hyperdags backend needs \ + WITH_HYPERDAGS_USING set" ON WITH_HYPERDAGS_USING OFF ) # to customize build flags for either backends or tests option( COMMON_COMPILE_DEFINITIONS "Compilation definitions for BOTH backends and tests; they override the defaults" @@ -117,6 +122,7 @@ endif() if( NOT WITH_REFERENCE_BACKEND AND NOT WITH_OMP_BACKEND AND + NOT WITH_NONBLOCKING_BACKEND AND NOT WITH_BSP1D_BACKEND AND NOT WITH_HYBRID_BACKEND ) message( FATAL_ERROR "At least one backend should be enabled") @@ -188,13 +194,18 @@ endif() # by default no headers are built set( WITH_REFERENCE_BACKEND_HEADERS OFF ) set( WITH_OMP_BACKEND_HEADERS OFF ) +set( WITH_HYPERDAGS_BACKEND_HEADERS OFF ) # activate headers based on requested backends -if( WITH_REFERENCE_BACKEND OR WITH_BSP1D_BACKEND ) - # both reference and bsp1d backends need reference headers +if( WITH_REFERENCE_BACKEND OR WITH_BSP1D_BACKEND OR WITH_NONBLOCKING_BACKEND ) + # reference, bsp1d and nonblocking backends need reference headers set( WITH_REFERENCE_BACKEND_HEADERS ON ) endif() +if( WITH_HYPERDAGS_BACKEND ) + set( WITH_HYPERDAGS_BACKEND_HEADERS ON ) +endif() + if( WITH_OMP_BACKEND OR WITH_HYBRID_BACKEND ) # both reference_omp and hynrid backends need reference headers set( WITH_OMP_BACKEND_HEADERS ON ) @@ -218,13 +229,28 @@ add_subdirectory( examples ) ### DOXYGEN DOCUMENTATION GENERATION -set( DOCS_DIR "${PROJECT_SOURCE_DIR}/docs/code" ) +set( DOCS_DIR "${PROJECT_SOURCE_DIR}/docs/developer" ) add_custom_command( OUTPUT "${DOCS_DIR}" - COMMAND bash -c "if [[ ! -d docs/code ]]; then doxygen docs/doxy.conf &> doxygen.log; fi" + COMMAND bash -c "doxygen docs/doxy.conf &> doxygen-developer.log;" WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" DEPENDS "${PROJECT_SOURCE_DIR}/docs/doxy.conf" COMMENT "producing code documentation in ${DOCS_DIR}" VERBATIM #USES_TERMINAL ) -add_custom_target( docs DEPENDS "${DOCS_DIR}" ) +add_custom_target( devdocs DEPENDS "${DOCS_DIR}" ) + +set( PUBLIC_DOCS_DIR "${PROJECT_SOURCE_DIR}/docs/user" ) +add_custom_command( OUTPUT "${PUBLIC_DOCS_DIR}" + COMMAND bash -c "doxygen docs/user.conf &> doxygen-user.log;" + WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}" + DEPENDS "${PROJECT_SOURCE_DIR}/docs/user.conf" + COMMENT "producing public code documentation in ${PUBLIC_DOCS_DIR}" + VERBATIM +) +add_custom_target( userdocs DEPENDS "${PUBLIC_DOCS_DIR}" ) +add_custom_target( docs ) +add_dependencies( docs userdocs devdocs ) + +message( "Compiling with the following backends: ${AVAILABLE_BACKENDS}\n" ) + diff --git a/NOTICE b/NOTICE index 3f1bf625d..3c370eca4 100644 --- a/NOTICE +++ b/NOTICE @@ -29,6 +29,8 @@ to Huawei Technologies Co., Ltd. or one of its subsidiaries: - Auke Booij, Huawei Technologies Switzerland AG; 2021. + - Anders Hansson, Huawei Technologies Switzerland AG; 2022-2023. + The experimental banshee backend has been developed in collaboration with Prof. Luca Benini at ETH Zuerich and his group. In particular this backend is with great thanks due to Dan, Paul Scheffler, Fabian Schuiki, and Samuel diff --git a/README.md b/README.md index ae65c9547..ff0b89d1e 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@
-   _____  .____   __________      /\   ________                    .__   __________.____       _____    _________
-  /  _  \ |    |  \______   \    / /  /  _____/___________  ______ |  |__\______   \    |     /  _  \  /   _____/
- /  /_\  \|    |   |     ___/   / /  /   \  __\_  __ \__  \ \____ \|  |  \|    |  _/    |    /  /_\  \ \_____  \
-/    |    \    |___|    |      / /   \    \_\  \  | \// __ \|  |_> >   Y  \    |   \    |___/    |    \/        \
-\____|__  /_______ \____|     / /     \______  /__|  (____  /   __/|___|  /______  /_______ \____|__  /_______  /
-        \/        \/          \/             \/           \/|__|        \/       \/        \/       \/        \/
+   _____  .____   __________
+  /  _  \ |    |  \______   \
+ /  /_\  \|    |   |     ___/
+/    |    \    |___|    |
+\____|__  /_______ \____|
+        \/        \/
 
   Copyright 2021 Huawei Technologies Co., Ltd.
 
@@ -22,6 +22,31 @@ limitations under the License.
 
+This distribution contains the C++ Algebraic Programming (ALP) framework, and +provides the ALP/GraphBLAS, ALP/Pregel, and Sparse BLAS programming interfaces. +Only a subset of Sparse BLAS functionality is supported, at present. + +This distribution contains ALP backends that generate: + - sequential programs, + - shared-memory auto-parallelised programs, + - nonblocking shared-memory auto-parallelised programs, and + - sequential programs that generate HyperDAG representations of the executed + ALP program. + +Additional backends may optionally be enabled by providing their dependences. +Those backends generate: + - distributed-memory auto-parallelised programs, + - hybrid shared- and distributed-memory auto-parallelised programs, and + - sequential programs for the Banshee RISC-V Snitch Core simulator + (experimental). + +All backends perform automatically generate vectorised programs, amongst other +automatically-applied optimisations. + +The ALP/GraphBLAS and ALP/Pregel interfaces are enabled for all backends, while +the standard Sparse BLAS APIs only allow for the efficient support of the +sequential and shared-memory parallel backends. + # Minimal requirements @@ -31,7 +56,7 @@ libraries and programs, using its `reference` and `reference_omp` backends. ## Compilation -To compile ALP/GraphBLAS, you need the following tools: +To compile ALP, you need the following tools: 1. A C++11-capable compiler such as GCC 4.8.2 or higher, with OpenMP support 2. LibNUMA development headers @@ -40,7 +65,7 @@ To compile ALP/GraphBLAS, you need the following tools: (CMake's default build tool on UNIX systems) or any other supported build tool. ## Linking and run-time -The ALP/GraphBLAS libraries link against the following libraries: +The ALP libraries link against the following libraries: 1. LibNUMA: `-lnuma` 2. Standard math library: `-lm` @@ -60,15 +85,15 @@ of the LPF core library and its collectives library. The LPF library has its further dependences, which are all summarised on the LPF project page: * [Gitee](https://gitee.com/CSL-ALP/lpf); -* [Github](https://github.com/Algebraic-Programming/LPF). +* [GitHub](https://github.com/Algebraic-Programming/LPF). -The dependence on LPF applies to compilation, linking, and run-time. Fulfulling +The dependence on LPF applies to compilation, linking, and run-time. Fulfilling the dependence enables the `bsp1d` and `hybrid` ALP/GraphBLAS backends. ## Code documentation For generating the code documentations: -* `doyxgen` reads code comments and generates the documentation; +* `doxygen` reads code comments and generates the documentation; * `graphviz` generates various diagrams for inheritance, call paths, etc.; * `pdflatex` is required to build the PDF file out of the Latex generated documentation. @@ -76,12 +101,12 @@ For generating the code documentations: # Very quick start -Here are example steps to compile and install ALP/GraphBLAS for shared-memory -machines, without distributed-memory support. The last three commands show-case -the compilation and execution of the `sp.cpp` example program. +Here are example steps to compile and install ALP for shared-memory machines +without distributed-memory support. The last three commands show-case the +compilation and execution of the `sp.cpp` example program. ```bash -cd +cd mkdir build cd build ../bootstrap.sh --prefix=../install @@ -101,35 +126,37 @@ In more detail, the steps to follow are: that `config::SIMD_SIZE::bytes` defined in that file is set correctly with respect to the target architecture. -2. Create an empty directory for building ALP/GraphBLAS and move into it: +2. Create an empty directory for building ALP and move into it: `mkdir build && cd build`. -3. Invoke the `bootstrap.sh` script located inside the ALP/GraphBLAS root directory - `` to generate the build infrastructure via CMake inside the - current directory: +3. Invoke the `bootstrap.sh` script located inside the ALP root directory + `` to generate the build infrastructure via CMake inside the + the current directory: - `/bootstrap.sh --prefix=` + `/bootstrap.sh --prefix=` - note: add `--with-lpf=/path/to/lpf/install/dir` if you have LPF installed and would like to use it. -4. Issue `make -j` to compile the C++11 ALP/GraphBLAS library for the configured - backends. +4. Issue `make -j` to compile the C++11 ALP library for the configured backends. 5. (*Optional*) To later run all unit tests, several datasets must be made - available. Please run the `/tools/downloadDatasets.sh` + available. Please run the `/tools/downloadDatasets.sh` script for a. an overview of datasets required for the basic tests, as well as b. the option to automatically download them. -6. (*Optional*) To make the ALP/GraphBLAS documentation, issue `make docs`. This +6. (*Optional*) To make the ALP documentation, issue `make userdocs`. This generates both - a. a PDF in `/docs/code/latex/refman.pdf`, and + a. LaTeX in `/docs/user/latex/refman.tex`, and + + b. HTML in `/docs/user/html/index.html`. - b. HTML in `/docs/code/html/index.html`. + To build a PDF from the LaTeX sources, cd into the directory mentioned, and + issue `make`. 7. (*Optional*) Issue `make -j smoketests` to run a quick set of functional tests. Please scan the output for any failed tests. @@ -138,20 +165,20 @@ In more detail, the steps to follow are: the default command lines the tests script uses are likely wrong. In this case, please edit `tests/parse_env.sh` by searching for the MPI implementation you used, and uncomment the lines directly below each - occurance. + occurrence. 8. (*Optional*) Issue `make -j unittests` to run an exhaustive set of unit tests. Please scan the output for any failed tests. If you do this with LPF enabled, please edit `tests/parse_env.sh` if required as described in step 5. -9. Issue `make -j install` to install ALP/GraphBLAS into your -install directory configured during step 1. +9. Issue `make -j install` to install ALP into the install directory configured + during step 1. -10. (*Optional*) Issue `source /bin/setenv` to make available the -`grbcxx` and `grbrun` compiler wrapper and runner. +10. (*Optional*) Issue `source /bin/setenv` to make + available the `grbcxx` and `grbrun` compiler wrapper and runner. -Congratulations, you are now ready for developing and integrating ALP/GraphBLAS +Congratulations, you are now ready for developing and integrating ALP algorithms! Any feedback, question, problem reports are most welcome at
@@ -161,10 +188,12 @@ algorithms! Any feedback, question, problem reports are most welcome at # Additional Contents -The remainder of this file summarises other build system targets, how to -integrate ALP algorithms into applications, debugging, development, and, -finally, acknowledges contributors and lists technical papers. +The remainder of this file summarises configuration options, additional build +system targets, how to integrate ALP programs into applications, debugging, and +contribute to ALP development. Finally, this README acknowledges contributors +and lists technical papers. +- [Configuration](#configuration) - [Overview of the main Makefile targets](#overview-of-the-main-makefile-targets) - [Automated performance testing](#automated-performance-testing) - [Integrating ALP with applications](#integrating-alp-with-applications) @@ -181,7 +210,97 @@ finally, acknowledges contributors and lists technical papers. - [Debugging](#debugging) - [Development in ALP](#development-in-alp) - [Acknowledgements](#acknowledgements) -- [Citing ALP and ALP/GraphBLAS](#citing-alp-and-alpgraphblas) +- [Citing ALP, ALP/GraphBLAS, and ALP/Pregel](#citing-alp-alpgraphblas-and-alppregel) + + +# Configuration + +ALP employs configuration headers that contain `constexpr` settings that take +effect every time ALP programs are compiled. Multiple object files that were +compiled using ALP must all been compiled using the same configuration +settings-- linking objects that have been compiled with a mixture of +configurations are likely to incur undefined behaviour. The recommendation is +to set a configuration before building and installing ALP, and to keep the +installation directories read-only so that configurations remain static. + +There exists one main configuration file that affects all ALP backends, while +other configuration files only affect a specific backend or only affect specific +classes of backends. The main configuration file is found in +`/include/graphblas/base/config.hpp`, which allows one to set the + +1. cache line size, in bytes, within the `CACHE_LINE_SIZE` class; +2. SIMD width, in bytes, within the `SIMD_SIZE` class; +3. default number of experiment repetitions during benchmarking, within the + `BENCHMARKING` class; +4. L1 data cache size, in bytes, within `MEMORY::big_memory` class; +5. from which size onwards memory allocations will be reported, in log-2 + bytes, within `MEMORY::big_memory`; +6. index type used for row coordinates, as the `RowIndexType` typedef; +7. index type used for column coordinates, as the `ColIndexType` typedef; +8. type used for indexing nonzeroes, as the `NonzeroIndexType` typedef; +9. index type used for vector coordinates, as the `VectorIndexType` typedef. + +Other configuration values in this file are automatically inferred, are fixed +non-configurable settings, or are presently not used by any ALP backend. + +## Reference and reference_omp backends + +The file `include/graphblas/reference/config.hpp` contain defaults that pertain +to the auto-vectorising and sequential `reference` backend, but also to the +shared-memory auto-parallelising `reference_omp` backend. It allows one to set + +1. whether prefetching is enabled in `PREFETCHING::enabled`; +2. the prefetch distance in `PREFETCHING::distance`; +3. the default memory allocation strategy for thread-local data in + `IMPLEMENTATION::defaultAllocMode()`; +4. same, but for shared data amongst threads in + `IMPLEMENTATION::sharedAllocMode()`; + +Modifying any of the above should be done with utmost care as it typically +affects the defaults across an ALP installation, and *all* programs compiled +using it. Configuration elements not mentioned here should not be touched by +users, and rather should concern ALP developers only. + +## OpenMP backends + +The file `include/graphblas/omp/config.hpp` contains some basic configuration +parameters that affect any OpenMP-based backend. However, the configuration +file does not contain any other user-modifiable settings, but rather contains +a) some utilities that OpenMP-based backends may rely on, and b) default +that are derived from other settings described in the above. These settings +should only be overridden with compelling and expert knowledge. + +## LPF backends + +The file `include/graphblas/bsp/config.hpp` contains some basic configuration +parameters that affect any LPF-based backend. It includes: + +1. an initial maximum of LPF memory slot registrations in `LPF::regs()`; +2. an initial maximum of LPF messages in `LPF::maxh()`. + +These defaults, if insufficient, will be automatically resized during execution. +Setting these large enough will therefore chiefly prevent buffer resizes at run- +time. Modifying these should normally not lead to significant performance +differences. + +## Utilities + +The file `include/graphblas/utils/config.hpp` details configurations of various +utility functions, including: + +1. a buffer size used during reading input files, in `PARSER::bsize()`; +2. the block size of individual reads in `PARSER::read_bsize()`. + +These defaults are usually fine except when reading from SSDs, which would +benefit of a larger `read_bsize`. + +## Others + +While there are various other configuration files (find `config.hpp`), the above +should list all user-modifiable configuration settings of interest. The +remainder pertain to configurations that are automatically deduced from the +aforementioned settings, or pertain to settings that describe how to safely +compose backends and thus only are of interest to ALP developers. # Overview of the main Makefile targets @@ -190,7 +309,8 @@ The following table lists the main build targets of interest: | Target | Explanation | |----------------------:|---------------------------------------------------| -| \[*default*\] | builds the ALP/GraphBLAS libraries and examples | +| \[*default*\] | builds the ALP libraries and examples, including | +| | Sparse BLAS libraries generated by ALP | | `install` | install libraries, headers and some convenience | | | scripts into the path set via `--prefix=` | | `unittests` | builds and runs all available unit tests | @@ -198,7 +318,12 @@ The following table lists the main build targets of interest: | `perftests` | builds and runs all available performance tests | | `tests` | builds and runs all available unit, smoke, and | | | performance tests | -| `docs` | builds HTML and LaTeX code and API documentation | +| `userdocs` | builds HTML and LaTeX documentation corresponding | +| | to the public ALP API | +| `devdocs` | builds HTML and LaTeX code documentation for | +| | developers of the ALP internals | +| `docs` | build both the user and developer code | +| | documentation | For more information about the testing harness, please refer to the [related documentation](tests/Tests.md). @@ -209,21 +334,20 @@ refer to the [the related documentation](docs/Build_and_test_infra.md). # Automated performance testing -To check in-depth performance of this ALP/GraphBLAS implementation, issue -`make -j perftests`. This will run several algorithms in several ALP/GraphBLAS +To check in-depth performance of this ALP implementation, issue +`make -j perftests`. This will run several algorithms in several ALP configurations. This generates three main output files: -1. `/tests/performance/output`, which summarises the - whole run; +1. `/tests/performance/output`, which summarises the whole run; -2. `/tests/performance/output/benchmarks`, which - summarises the performance of individual algorithms; and +2. `/tests/performance/output/benchmarks`, which summarises the + performance of individual algorithms; and -3. `/tests/performance/output/scaling`, which - summarises operator scaling results. +3. `/tests/performance/output/scaling`, which summarises operator + scaling results. -To ensure that all tests run, please ensure all related datasets are available -as also described at step 5 of the quick start. +To ensure that all tests run, please ensure that all related datasets are +available, as also described at step 5 of the quick start. With LPF enabled, please note the remark described at steps 3 and 7 of the quick start guide. If LPF was not configured using MPICH, please review and apply any @@ -232,24 +356,28 @@ necessary changes to `tests/performance/performancetests.sh`. # Integrating ALP with applications -There are several use cases in which ALP can be deployed and utilized, listed -in the following. These assume that the user has installed ALP/GraphBLAS in a -dedicated directory via `make install`. +There are several use cases in which ALP can be deployed and utilised, listed +in the following. These assume that the user has installed ALP in a dedicated +directory via `make install`. ## Running ALP programs as standalone executables ### Implementation The `grb::Launcher< AUTOMATIC >` class abstracts a group of user processes that -should collaboratively execute any single ALP/GraphBLAS program. The -ALP/GraphBLAS program of interest must have the following signature: -`void grb_program( const T& input_data, U& output_data )`. +should collaboratively execute any single ALP program. The ALP program of +interest must have the following signature: + +``` +void grb_program( const T& input_data, U& output_data ) +``` + The types `T` and `U` can be any plain-old-data (POD) type, including structs -- these can be used to broadcast input data from the master process to all user processes (`input_data`) -- and for data to be sent back on exit of the parallel -ALP/GraphBLAS program. +ALP program. -The above sending-and-receiving across processes applies only to ALP/GraphBLAS +The above sending-and-receiving across processes applies only to ALP implementations and backends that support or require multiple user processes; both the sequential `reference` and the shared-memory parallel `reference_omp` backends, for example, support only one user process. @@ -258,11 +386,11 @@ In case of multiple user processes, the overhead of the broadcasting of input data is linear in the number of user processes, as well as linear in the byte- size of `T` which hence should be kept to a minimum. A recommended use of this mechanism is, e.g., to broadcast input data locations; any additional I/O -should use the parallel I/O mechanisms that ALP/GraphBLAS exposes to the ALP -program itself. +should use the parallel I/O mechanisms that ALP exposes to the ALP program +itself. Output data is retrieved only from the user process with ID `0`, even if -multiple user processes exist. Some implemenations or systems may require +multiple user processes exist. Some implementations or systems may require sending back the output data to a calling process, even if there is only one user process. The data movement cost incurred should hence be considered linear in the byte size of `U`, and, similar to the input data broadcasting, @@ -287,60 +415,67 @@ your programs using the ALP installation, the following flags are recommended: Omitting these flags for brevity, some compilation examples follow. -When using the LPF-enabled hybrid shared- and distributed-memory backend of -ALP/GraphBLAS, simply use +When using the LPF-enabled hybrid shared- and distributed-memory ALP backends, ```bash grbcxx -b hybrid ``` -as the compiler command. To show all flags that the wrapper passes on, please use + +as the compiler command. To show all flags that the wrapper passes on, please +use ```bash grbcxx -b hybrid --show ``` + and append your regular compilation arguments. -The `hybrid` backend is capable of spawning multiple ALP/GraphBLAS user -processes. In contrast, compilation using +The `hybrid` backend is capable of spawning multiple ALP user processes. In +contrast, compilation using ```bash grbcxx -b reference ``` + produces a sequential binary, while ```bash grbcxx -b reference_omp ``` + produces a shared-memory parallel binary. -Note that the ALP/GraphBLAS source code never requires change while switching -backends. +Note that the ALP source code never requires change while switching backends. ### Linking -The executable must be statically linked against an ALP/GraphBLAS library that -is different depending on the selected backend. +The executable must be statically linked against an ALP library that is +different depending on the selected backend. The compiler wrapper `grbcxx` takes care of all link-time dependencies automatically. -When using the LPF-enabled BSP1D backend to ALP/GraphBLAS, for example, simply -use `grbcxx -b bsp1d` as the compiler/linker command. +When using the LPF-enabled BSP1D backend to ALP, for example, simply use +`grbcxx -b bsp1d` as the compiler/linker command. + Use ```bash grbcxx -b bsp1d --show ``` + to show all flags that the wrapper passes on. ### Running The resulting program has run-time dependencies that are taken care of by the -LPF runner `lpfrun` or by the ALP/GraphBLAS runner `grbrun`. +LPF runner `lpfrun` or by the ALP runner `grbrun`. + We recommend using the latter: ```bash grbrun -b hybrid -np

``` -Here, `P` is the number of requested ALP/GraphBLAS user processes. + +Here, `P` is the number of requested ALP user processes. ### Threading @@ -350,18 +485,18 @@ on a single node, the `reference_omp` backend may be selected instead. In both cases, make sure that during execution the `OMP_NUM_THREADS` and `OMP_PROC_BIND` environment variables are set appropriately on each node that -executes ALP/GraphBLAS user process(es). +executes ALP user process(es). ## Running parallel ALP programs from existing parallel contexts This, instead of automatically spawning a requested number of user processes, assumes a number of processes already exist and that we wish those processes to -jointly execute a single parallel ALP/GraphBLAS program. +jointly execute a single parallel ALP program. ### Implementation -The binary that contains the ALP/GraphBLAS program to be executed must define -the following global symbol with the given value: +The binary that contains the ALP program to be executed must define the +following global symbol with the given value: ```c++ const int LPF_MPI_AUTO_INITIALIZE = 0 @@ -377,19 +512,19 @@ grb::Launcher< MANUAL > launcher( s, P, hostname, portname ) ``` Here, `P` is the total number of processes that should jointly execute a -parallel ALP/GraphBLAS program, while `0 <= s < P` is a unique ID of this -process amongst its `P`-1 siblings. -The types of `s` and `P` are `size_t`, i.e., unsigned integers. +parallel ALP program, while `0 <= s < P` is a unique ID of this process amongst +its `P`-1 siblings. The types of `s` and `P` are `size_t`, i.e., unsigned +integers. One of these processes must be selected as a connection broker prior to forming -a group of ALP/GraphBLAS user processes. The remainder `P-1` processes must -first connect to the chosen broker using TCP/IP connections. This choice must -be made outside of ALP/GraphBLAS, prior to setting up the launcher, and -materialises as the `hostname` and `portname` Launcher constructor arguments. -The host and port name are strings, and must be equal across all processes. +a group of ALP user processes. The remainder `P-1` processes must first connect +to the chosen broker using TCP/IP connections. This choice must be made outside +of ALP, prior to setting up the launcher, and materialises as the `hostname` and +`portname` Launcher constructor arguments. The host and port name are strings, +and must be equal across all processes. As before, and after the successful construction of a manual launcher instance, -a parallel ALP/GraphBLAS program is launched via +a parallel ALP program is launched via ```c++ grb::Launcher< MANUAL >::exec( &grb_program, input, output ) @@ -398,25 +533,24 @@ grb::Launcher< MANUAL >::exec( &grb_program, input, output ) in exactly the same way as described earlier, though with the input and output arguments now being passed in a one-to-one fashion: 1. The input data is passed on from the original process to exactly one - corresponding ALP/GraphBLAS user process; i.e., no broadcast occurs. The - original process and the ALP/GraphBLAS user process are, from an operating - system point of view, the same process. Therefore, and additionally, input - no longer needs to be a plain-old-data (POD) type. Pointers, for example, - are now perfectly valid to pass along, and enable sharing data between the - original process and the ALP/GraphBLAS algorithm. - 2. The output data is passed from each ALP/GraphBLAS user process to the - original process that called `Launcher< MANUAL >::exec`. To share - ALP/GraphBLAS vector data, it is, for example, legal to return a - `grb::PinnedVector< T >` as the `exec` output argument type. Doing so is - akin to returning a pointer to output data, and does not explicitly pack - nor transmit vector data. + corresponding ALP user process; i.e., no broadcast occurs. The original + process and the ALP user process are, from an operating system point of + view, the same process. Therefore, and additionally, input no longer needs + to be a plain-old-data (POD) type. Pointers, for example, are now perfectly + valid to pass along, and enable sharing data between the original process + and the ALP algorithm. + 2. The output data is passed from each ALP user process to the original + process that called `Launcher< MANUAL >::exec`. To share ALP vector data, + it is, for example, legal to return a `grb::PinnedVector< T >` as the + `exec` output argument type. Doing so is akin to returning a pointer to + output data, and does not explicitly pack nor transmit vector data. ### Running The pre-existing process must have been started using an external mechanism. This mechanism must include run-time dependence information that is normally -passed by the ALP/GraphBLAS runner whenever a distributed-memory parallel -backend is selected. +passed by the ALP runner whenever a distributed-memory parallel backend is +selected. If the external mechanism by which the original processes are started allows it, this is most easily effected by using the standard `grbcxx` launcher while @@ -444,14 +578,14 @@ to add ALP and ALP/GraphBLAS as a dependence to your project. # Debugging -To debug an ALP/GraphBLAS program, please compile it using the sequential -reference backend and use standard debugging tools such as `valgrind` and `gdb`. +To debug an ALP program, please compile it using the sequential reference +backend and use standard debugging tools such as `valgrind` and `gdb`. Additionally, please ensure to *not* pass the `-DNDEBUG` flag during compilation. If bugs appear in one backend but not another, it is likely you have found a bug -in the former backend implementation. Please send a minimum working example that -demonstrates the bug to the maintainers, either as an issue on or an email to: +in the former backend. Please send a minimum working example that demonstrates +the bug to the maintainers, either as an issue on or an email to: 1. [GitHub](https://github.com/Algebraic-Programming/ALP/issues); 2. [Gitee](https://gitee.com/CSL-ALP/graphblas/issues); 3. [Albert-Jan](mailto:albertjan.yzelman@huawei.com). @@ -459,8 +593,8 @@ demonstrates the bug to the maintainers, either as an issue on or an email to: # Development in ALP -Your contributions to ALP/GraphBLAS would be most welcome. Merge or Pull Requests -(MRs/PRs) can be contributed via Gitee and GitHub. See above for the links. +Your contributions to ALP would be most welcome. Merge Requests (MRs) can be +contributed via Gitee and GitHub; see above for the links. For the complete development documentation, you should start from the [docs/README file](docs/README.md) and the related @@ -470,10 +604,10 @@ For the complete development documentation, you should start from the # Acknowledgements The LPF communications layer was primarily authored by Wijnand Suijlen, without -whom the current ALP/GraphBLAS would not be what it is now. +whom the current ALP would not be what it is now. -The collectives library and its interface to the ALP/GraphBLAS was primarily -authored by Jonathan M. Nash. +The collectives library and its interface to the ALP was primarily authored by +Jonathan M. Nash. The testing infrastructure that performs smoke, unit, and performance testing of sequential, shared-memory parallel, and distributed-memory parallel backends was @@ -485,17 +619,30 @@ Computing Systems Laboratory in Zürich in particular. See the [NOTICE](NOTICE) file for individual contributors. -# Citing ALP and ALP/GraphBLAS +# Citing ALP, ALP/GraphBLAS, and ALP/Pregel + +If you use ALP in your work, please consider citing one or more of the following +papers, as appropriate. -If you use ALP/GraphBLAS in your work, please consider citing one or more of the -following papers, as appropriate: +## ALP and ALP/GraphBLAS - [A C++ GraphBLAS: specification, implementation, parallelisation, and evaluation](http://albert-jan.yzelman.net/PDFs/yzelman20.pdf) by A. N. Yzelman, D. Di Nardo, J. M. Nash, and W. J. Suijlen (2020). Pre-print. [Bibtex](http://albert-jan.yzelman.net/BIBs/yzelman20.bib). - - [Nonblocking execution in GraphBLAS](http://albert-jan.yzelman.net/PDFs/mastoras22-pp.pdf) - by Aristeidis Mastoras, Sotiris Anagnostidis, and A. N. Yzelman (2022). - Pre-print. + - [Nonblocking execution in GraphBLAS](https://ieeexplore.ieee.org/document/9835271) + by Aristeidis Mastoras, Sotiris Anagnostidis, and A. N. Yzelman + in IEEE International Parallel and Distributed Processing Symposium + Workshops, 2022. [Bibtex](http://albert-jan.yzelman.net/BIBs/mastoras22.bib). + - [Design and implementation for nonblocking execution in GraphBLAS: tradeoffs and performance](https://dl.acm.org/doi/10.1145/3561652) + by Aristeidis Mastoras, Sotiris Anagnostidis, and A. N. Yzelman + in ACM Transactions on Architecture and Code Optimization 20(1), 2023. + [Bibtex](http://albert-jan.yzelman.net/BIBs/mastoras22a.bib). + +## ALP/Pregel + + - [Humble Heroes](http://albert-jan.yzelman.net/PDFs/yzelman22-pp.pdf) + by A. N. Yzelman (2022). Pre-print. + [Bibtex](http://albert-jan.yzelman.net/BIBs/yzelman22.bib). diff --git a/bootstrap.sh b/bootstrap.sh index 89b865a15..8acfdfa58 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -63,7 +63,7 @@ validate_command_result() { print_help() { echo "Usage: $0 --prefix= [--with-lpf[=]]\ - [--with-banshee=] [--with-snitch=] [--no-reference] [--debug-build] [--generator=] [--show] [--delete-files]" + [--with-banshee=] [--with-snitch=] [--no-reference] [--no-nonblocking] [--debug-build] [--generator=] [--show] [--delete-files]" echo " " echo "Required arguments:" echo " --prefix=" @@ -74,6 +74,11 @@ the location where LPF is installed" echo " --with-banshee= - path to the the tools to compile the banshee backend" echo " --with-snitch= - path to the tools for Snitch support within the banshee backend" echo " --no-reference - disables the reference and reference_omp backends" + echo " --no-hyperdags - disables the hyperdags backend" + echo " --with-hyperdags-using= - uses the given backend reference for HyperDAG generation" + echo " optional; default value is reference" + echo " clashes with --no-hyperdags" + echo " --no-nonblocking - disables the nonblocking backend" echo " --debug-build - build the project with debug options (tests will run much slower!)" echo " --generator= - set the generator for CMake (otherwise use CMake's default)" echo " --show - show generation commands instead of running them" @@ -90,6 +95,9 @@ the location where LPF is installed" } reference=yes +hyperdags=yes +hyperdags_using=reference +nonblocking=yes banshee=no lpf=no show=no @@ -146,6 +154,16 @@ or assume default paths (--with-lpf)" --no-reference) reference=no ;; + --no-hyperdags) + hyperdags=no + ;; + --with-hyperdags-using=*) + hyperdags=yes + hyperdags_using="${arg#--with-hyperdags-using=}" + ;; + --no-nonblocking) + nonblocking=no + ;; --debug-build) debug_build=yes ;; @@ -202,6 +220,19 @@ if [[ "${reference}" == "yes" || "${lpf}" == "yes" ]]; then check_cc_cpp_comp fi +if [[ "${hyperdags}" == "yes" ]]; then + if [[ "${hyperdags_using}" != "reference" ]]; then + printf "Hyperdags backend requested using the ${hyperdags_using} backend, " + printf "but only the reference backend is supported currently." + exit 255 + fi + if [[ "${hyperdags_using}" == "reference" && "${reference}" == "no" ]]; then + printf "Hyperdags backend is selected using the reference backend, " + printf "but the reference backend was not selected." + exit 255 + fi +fi + if [[ "${lpf}" == "yes" ]]; then if [[ -z "${LPF_INSTALL_PATH}" ]]; then check_lpf @@ -228,7 +259,7 @@ CURRENT_DIR="$(pwd)" SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" # CONFIGURE CMAKE BUILDING INFRASTRUCTURE -if [[ "${reference}" == "yes" || "${lpf}" == "yes" ]]; then +if [[ "${reference}" == "yes" || "${lpf}" == "yes" || "${nonblocking}" == "yes" ]]; then BUILD_DIR="${CURRENT_DIR}" printf "Checking for cmake..." @@ -287,6 +318,15 @@ the current directory before invocation or confirm the deletion of its content w if [[ "${reference}" == "no" ]]; then CMAKE_OPTS+=" -DWITH_REFERENCE_BACKEND=OFF -DWITH_OMP_BACKEND=OFF" fi + if [[ "${hyperdags}" == "no" ]]; then + CMAKE_OPTS+=" -DWITH_HYPERDAGS_BACKEND=OFF" + fi + if [[ "${hyperdags}" == "yes" ]]; then + CMAKE_OPTS+=" -DWITH_HYPERDAGS_USING=${hyperdags_using}" + fi + if [[ "${nonblocking}" == "no" ]]; then + CMAKE_OPTS+=" -DWITH_NONBLOCKING_BACKEND=OFF" + fi if [[ "${lpf}" == "yes" ]]; then CMAKE_OPTS+=" -DLPF_INSTALL_PATH='${ABSOLUTE_LPF_INSTALL_PATH}'" fi diff --git a/changelog.md b/changelog.md index 72aa3e4d6..3a77e6b5e 100644 --- a/changelog.md +++ b/changelog.md @@ -1,4 +1,128 @@ +Version 0.7.0 +============= + +This is a summary of changes. For full details, see the publicly available Git +history prior to the v0.7 tag. + +Highlights: + + 1. This release re-implements the nonblocking ALP/GraphBLAS backend by Mastoras + et al. (GrAPL/IPDPSW '22, TACO '23) on the latest ALP code base. The use of + the nonblocking backend for some algorithms results in multiple-factor + speedups versus standard blocking execution as well as versus external + industry-standard frameworks. This includes Eigen, which, like nonblocking + ALP/GraphBLAS, perform cross-operation fusion. Simply compile your ALP + programs using `grbcxx -b nonblocking`, and enjoy the speedups! + + 2. We also introduce a new programming interface to the ALP software stack that + allows vertex-centric programming in addition to programming using + generalised sparse linear algebra. This new interface, ALP/Pregel, + translates vertex-centric programs to standard ALP/GraphBLAS primitives + during compilation, and thus benefits of all automatic optimisations + included with the ALP software stack. + + 3. Support for software prefetching during `vxm` and `mxv` has been added to + the `reference` and `reference_omp` backends. Since optimal prefetch + settings and its overall effectiveness relies strongly on 1) the structure + of the sparse matrices and graphs considered as well as on 2) the algorithms + used on those data, this new feature is turned off by default. To use it, + please enable it via `include/graphblas/reference/config.hpp` and tune the + there-defined prefetch distances. + + 4. Finally, this release includes another new backend, the `hyperdags` backend. + A program compiled with this backend will, after execution, dump a HyperDAG + representation of the ALP computations that the program executed. + +Changes to the specification: + + 1. Any ALP primitive with ALP container output now takes a Phase argument. + + 2. Clarify that the use of the `dense` descriptor also implies that the output + containers on entry must be dense. This applies also for out-of-place + primitives. + +Algorithms: + - [new] a vertex-centric PageRank-like algorithm implemented on top of the new + ALP/Pregel has been added; + - [new] a vertex-centric algorithm for strongly connected components on + undirected graphs implemented on top of ALP/Pregel has been added; + - [new] the algebraic k-core decomposition algorithm by Li et al. (HPEC '21) + has been added; + - [bug] the mpv algorithm performed one too many iterations, while all + associated tests used an ALP/GraphBLAS baseline-- v0.7 now instead verifies + against external ground truths; + - [bug] the label propagation algorithm relied on a bugged implementation of + `grb::set`, now fixed, while it now and when possible relies on `std::swap` + instead of performing explicit and expensive copies; + - [bug] the CG algorithm returned `SUCCESS` even it failed to converge within + the given number of maximum iterations. + +Operators: + - [new] v0.7 (re-)introduces the four less-than(-or-equal) and + greater-than(-or-equal) operators; + +All backends: + - [bug] fixed the behaviour of ALP containers under copy-assignment and + copy-construction; + - [bug] all variants of `foldl` and `foldr` previously could erroneously return + `ILLEGAL` in the presence of sparse vectors and/or masks; + - [bug] several primitives would not return `ILLEGAL` in the presence of the + `dense` descriptor when faced with sparse containers; + - [bug] all backends missed the implementation of at least one `eWiseMul` + variant; + - [bug] all backends missed the implementation of at least two `eWiseApply` + variants where both inputs are scalar; + - [feature] improved `_DEBUG` tracing and code style throughout. + +Reference and reference_omp backends: + - [bug] overlap detection of the output and output mask was erroneously + disabled for the `vxm` and `mxv` primitives, herewith fixed; + - [bug] `foldl` and `foldr` previously have employed unexpected casting + behaviour; + - [bug] multiple copy-assignment of the same vector could fail; + - [bug] the vector<-scalar<-vector `eWiseApply` using operators was in-place; + - [bug] the `eWiseApply` using sparse vector inputs and/or masks could in some + rare cases depending on structure and vector lengths generate incorrect + output; + - [bug] the implementation of the vector `grb::set` where the output container + was not already dense was in-place, while out-of-place semantics were + defined; + - [bug] the output-masked `eWiseMul` was bugged in the case where one of the + inputs was scalar; + - [bug] matrix containers with initial requested capacity zero could attempt + to access uninitialised memory, including even after a successful subsequent + `resize`; + - [performance] `foldl` and `foldr` using sparse vectors and/or masks were + previously not always following asymptotically optimal behaviour; + - [performance] `set` previously did not exploit information such as whether + the `dense` descriptor was present, whether vectors need only touch + coordinate data to generate correct output, or whether it never needs to + touch coordinate data; + - [performance] `eWiseApply` detects more cases of trivial operations on empty + vectors, and completes those faster; + - [performance] optimised `eWiseMul` with scalar inputs. + +BSP1D and hybrid backends: + - [bug] the output-masked `vxm` and various `foldl` and `foldr` were missing; + - [bug] copy-assignment operator for vectors was missing. + +Testing, development, and documentation: + - the unit test suite has been hardened to detect all aforementioned bugs; + - outdated documentation was revised-- in particular, all user-facing + documentation has been checked and can now be generated via the new make + target `make userdocs`; + - developer documentation is now built via `make devdocs`, while the older + `make docs` target now builds both the user and developer documentation; + - new developers can now enjoy an updated developer guide; + - the test suite now prints an error when the automatic detection of the number + of sockets fails, and then auto-selects one instead of zero (which caused the + test scripts to fail); + - added performance tests for the sparse matrix--vector, sparse matrix--sparse + vector, and sparse matrix--sparse matrix multiplication kernels; + - improved both the GitHub and internal CI scripts. + + Version 0.6.0 ============= @@ -8,7 +132,7 @@ history prior to the v0.6 tag. Highlights and changes to the specification: - Deprecated `grb::init` and `grb::finalize` in favour of grb::Launcher. Existing code should migrate to using the Launcher as any later release may - remove the now-deprecated primtives. + remove the now-deprecated primitives. - If you wish to rely on ALP/GraphBLAS for more standard sparse linear algebra but if you cannot, or do not wish to, adapt your existing sources to the C++ ALP/GraphBLAS API, then v0.6 onwards generates libraries that @@ -70,7 +194,7 @@ Reference and reference_omp backends: properly updated. - Bugfix: the OpenMP `schedule( static, chunk_size )` has a dynamic (run-time) component that was not intended. - - Bugifx: some OpenMP `schedule( dynamic, chunk_size )` operate on regular + - Bugfix: some OpenMP `schedule( dynamic, chunk_size )` operate on regular loops and should employ a static schedule instead. BSP1D backend: @@ -198,7 +322,7 @@ BSP1D and hybrid backends: declared as part of BSP1D friend declarations. Curiously, many compilers accepted the previous erroneous code. - Bugfix: empty BSP1D containers could previously leave process-local matrices - unitialised. + uninitialised. Reference and reference_omp backends: - Bugfix: matrix construction did not use the `alloc.hpp` mechanisms. This @@ -207,7 +331,7 @@ Reference and reference_omp backends: All backends: - Bugfix: `grb::Launcher` (as well as the benchmarker) did not always properly - finalize the ALP/GraphBLAS context after exec completed. This caused some + finalise the ALP/GraphBLAS context after exec completed. This caused some memory to not be properly freed on program exits. - Bugfix: the out-of-place versions of `grb::operators::{argmin,argmax}` were incorrect. All code within the repository was unaffected by this bug. The @@ -224,7 +348,7 @@ Version 0.4.1 - The CG algorithm assumed out-of-place behaviour of grb::dot, while the specification since v0.1 defines it to be in-place. Implementations of grb::dot were erroneously out-of-place until v0.4, but the CG algorithm - was errouneously not updated. This hotfix rectifies this. + was erroneously not updated. This hotfix rectifies this. Version 0.4.0 @@ -276,36 +400,46 @@ Version 0.3.0 ============= Reference and reference_omp backends: - - Fixed issue where grb::set, grb::vxm, and grb::mxv could fail for more exotic data types. - - Fixed issue that prevented std::move on matrices, both from assignment and construction. + - Fixed issue where grb::set, grb::vxm, and grb::mxv could fail for more + exotic data types. + - Fixed issue that prevented std::move on matrices, both from assignment and + construction. - Optimised masked grb::set to now reach optimal complexity in all cases. - Optimised grb::eWiseLambda over matrices to avoid atomics. BSP1D backend: - - Fixed issue where iterating over empty matrices could fail in the BSP1D backend. - - Fixed issue in BSP1D backend that caused dynamic allocations where they were not allowed. - - Fixed issue where the automatic-mode launcher and benchmarker could, in rare cases, fail. + - Fixed issue where iterating over empty matrices could fail in the BSP1D + backend. + - Fixed issue in BSP1D backend that caused dynamic allocations where they were + not allowed. + - Fixed issue where the automatic-mode launcher and benchmarker could, in rare + cases, fail. - Fixed issue where, under rare conditions, the stack-based combine could fail. - - Fixed performance bug in the BSP1D backend causing spurious calls to lpf_sync. + - Fixed performance bug in the BSP1D backend causing spurious calls to + lpf_sync. Level-3 functionality, all backends: - Fixed issue where a masked set-to-value on matrices would fail. - - Fixed issue where mxm could work with unitialised values when more exotic semirings are used. - - Fixed issue that prevented std::move on matrices, both from assignment and construction. + - Fixed issue where mxm could work with uninitialised values when more exotic + semirings are used. + - Fixed issue that prevented std::move on matrices, both from assignment and + construction. - New level-3 function: eWiseApply. (Note that the interface of level-3 functionality remains experimental.) Algorithms and utilities: - - Fixed issue where MatrixFileReader would store unitialised values when reading pattern matrices. + - Fixed issue where MatrixFileReader would store uninitialised values when + reading pattern matrices. - Updated the sparse neural network inference algorithm. - New algorithm added: spy. Others: - Fixed issue where a `make clean` would miss some object files. - - Added new unit and performance tests, including those for detecting the above-described bug - fixes and added functionality. - - Documentation update in line with the upcoming revision of the C++ GraphBLAS paper. + - Added new unit and performance tests, including those for detecting the + above-described bug fixes and added functionality. + - Documentation update in line with the upcoming revision of the C++ GraphBLAS + paper. - Added some missing documentation. - Code style fixes and some dead code removal. @@ -313,7 +447,8 @@ Others: Version 0.2.0 ============= -Fix some issues in the Banshee backend that appeared after refactoring for the 0.1.0 release. +Fix some issues in the Banshee backend that appeared after refactoring for the +0.1.0 release. Removes --deps option from ./configure as it was no longer used. diff --git a/cmake/AddGRBInstall.cmake b/cmake/AddGRBInstall.cmake index f4b254b8f..94bd58f31 100644 --- a/cmake/AddGRBInstall.cmake +++ b/cmake/AddGRBInstall.cmake @@ -18,8 +18,8 @@ # defines variables for the creation of wrapper scripts and the installation # -assert_defined_variables( WITH_REFERENCE_BACKEND WITH_OMP_BACKEND WITH_BSP1D_BACKEND - WITH_HYBRID_BACKEND WITH_NUMA +assert_defined_variables( WITH_REFERENCE_BACKEND WITH_OMP_BACKEND WITH_NONBLOCKING_BACKEND + WITH_BSP1D_BACKEND WITH_HYBRID_BACKEND WITH_NUMA ) assert_valid_variables( CMAKE_INSTALL_PREFIX AVAILABLE_BACKENDS CMAKE_CXX_COMPILER ) @@ -44,6 +44,7 @@ install( EXPORT GraphBLASTargets # paths where to install the binaries of the various backends set( ALP_UTILS_INSTALL_DIR "${BINARY_LIBRARIES_INSTALL_DIR}" ) set( SHMEM_BACKEND_INSTALL_DIR "${BINARY_LIBRARIES_INSTALL_DIR}/sequential" ) +set( HYPERDAGS_BACKEND_INSTALL_DIR "${BINARY_LIBRARIES_INSTALL_DIR}/hyperdags" ) set( BSP1D_BACKEND_INSTALL_DIR "${BINARY_LIBRARIES_INSTALL_DIR}/spmd" ) set( HYBRID_BACKEND_INSTALL_DIR "${BINARY_LIBRARIES_INSTALL_DIR}/hybrid" ) @@ -112,7 +113,7 @@ endif() # paths may have spaces, hence wrap them inside single quotes '' # shared memory backends -if ( WITH_REFERENCE_BACKEND ) +if( WITH_REFERENCE_BACKEND ) addBackendWrapperGenOptions( "reference" COMPILE_DEFINITIONS "${REFERENCE_SELECTION_DEFS}" LINK_FLAGS "'${SHMEM_BACKEND_INSTALL_DIR}/lib${BACKEND_LIBRARY_OUTPUT_NAME}.a'" @@ -128,6 +129,23 @@ if( WITH_OMP_BACKEND ) ) endif() +# dependent backends +if( WITH_HYPERDAGS_BACKEND ) + addBackendWrapperGenOptions( "hyperdags" + COMPILE_DEFINITIONS "${HYPERDAGS_SELECTION_DEFS};${HYPERDAGS_INCLUDE_DEFS}" + LINK_FLAGS "'${HYPERDAGS_BACKEND_INSTALL_DIR}/lib${BACKEND_LIBRARY_OUTPUT_NAME}.a'" + "'${ALP_UTILS_INSTALL_DIR}/lib${ALP_UTILS_LIBRARY_OUTPUT_NAME}.a'" "${NUMA_LFLAG}" + ) +endif() + +if( WITH_NONBLOCKING_BACKEND ) + addBackendWrapperGenOptions( "nonblocking" + COMPILE_DEFINITIONS "${NONBLOCKING_SELECTION_DEFS};${NONBLOCKING_INCLUDE_DEFS}" + LINK_FLAGS "'${SHMEM_BACKEND_INSTALL_DIR}/lib${BACKEND_LIBRARY_OUTPUT_NAME}.a'" + "'${ALP_UTILS_INSTALL_DIR}/lib${ALP_UTILS_LIBRARY_OUTPUT_NAME}.a'" "${NUMA_LFLAG}" + ) +endif() + # distributed memory backends if( WITH_BSP1D_BACKEND OR WITH_HYBRID_BACKEND ) assert_valid_variables( LPFRUN LPFCPP ) diff --git a/cmake/AddGRBTests.cmake b/cmake/AddGRBTests.cmake index d05be44c8..cec04eb68 100644 --- a/cmake/AddGRBTests.cmake +++ b/cmake/AddGRBTests.cmake @@ -31,9 +31,6 @@ assert_valid_variables( ALL_BACKENDS AVAILABLE_BACKENDS TEST_CATEGORIES # create variables to store tests against each backend foreach( b ${AVAILABLE_BACKENDS} ) - if( NOT TARGET "backend_${b}" ) - message( FATAL_ERROR "Needed target backend_${b} does not exist!" ) - endif() define_property( GLOBAL PROPERTY tests_backend_${b} BRIEF_DOCS "${b} tests" FULL_DOCS "tests for backend ${b}" ) endforeach() diff --git a/cmake/AddGRBVars.cmake b/cmake/AddGRBVars.cmake index 2b1bc012b..fab0f9ac9 100644 --- a/cmake/AddGRBVars.cmake +++ b/cmake/AddGRBVars.cmake @@ -21,8 +21,8 @@ # to add a new backend, add your own to each ### SECTION # -assert_defined_variables( WITH_REFERENCE_BACKEND WITH_OMP_BACKEND WITH_BSP1D_BACKEND - WITH_HYBRID_BACKEND WITH_NUMA +assert_defined_variables( WITH_REFERENCE_BACKEND WITH_OMP_BACKEND WITH_NONBLOCKING_BACKEND + WITH_BSP1D_BACKEND WITH_HYBRID_BACKEND ) ### STANDARD TARGET NAMES @@ -31,18 +31,26 @@ set( REFERENCE_BACKEND_DEFAULT_NAME "backend_reference" ) set( REFERENCE_OMP_BACKEND_DEFAULT_NAME "backend_reference_omp" ) set( BSP1D_BACKEND_DEFAULT_NAME "backend_bsp1d" ) set( HYBRID_BACKEND_DEFAULT_NAME "backend_hybrid" ) - +set( HYPERDAGS_BACKEND_DEFAULT_NAME "backend_hyperdags" ) +set( NONBLOCKING_BACKEND_DEFAULT_NAME "backend_nonblocking" ) ### COMPILER DEFINITIONS FOR HEADERS INCLUSION AND FOR BACKEND SELECTION # compiler definitions to include backend headers set( REFERENCE_INCLUDE_DEFS "_GRB_WITH_REFERENCE" ) set( REFERENCE_OMP_INCLUDE_DEFS "_GRB_WITH_OMP" ) +set( HYPERDAGS_INCLUDE_DEFS "_GRB_WITH_HYPERDAGS" ) +set( NONBLOCKING_INCLUDE_DEFS "_GRB_WITH_NONBLOCKING" ) set( LPF_INCLUDE_DEFS "_GRB_WITH_LPF" ) # compiler definitions to select a backend set( REFERENCE_SELECTION_DEFS "_GRB_BACKEND=reference" ) set( REFERENCE_OMP_SELECTION_DEFS "_GRB_BACKEND=reference_omp" ) +set( HYPERDAGS_SELECTION_DEFS + "_GRB_BACKEND=hyperdags" + "_GRB_WITH_HYPERDAGS_USING=${WITH_HYPERDAGS_USING}" +) +set( NONBLOCKING_SELECTION_DEFS "_GRB_BACKEND=nonblocking" ) set( BSP1D_SELECTION_DEFS "_GRB_BACKEND=BSP1D" "_GRB_BSP1D_BACKEND=reference" @@ -56,8 +64,7 @@ set( HYBRID_SELECTION_DEFS set( NO_NUMA_DEF "_GRB_NO_LIBNUMA" ) ### **ALL** BACKENDS, EVEN IF NOT ENABLED BY USER -set( ALL_BACKENDS "reference" "reference_omp" "bsp1d" "hybrid" ) - +set( ALL_BACKENDS "reference" "reference_omp" "hyperdags" "nonblocking" "bsp1d" "hybrid" ) # list of user-enabled backends, for tests and wrapper scripts (do not change!) set( AVAILABLE_BACKENDS "" ) @@ -66,7 +73,7 @@ set( AVAILABLE_BACKENDS "" ) # backends that are enabled by the user: append as in the following # shared memory backends -if ( WITH_REFERENCE_BACKEND ) +if( WITH_REFERENCE_BACKEND ) list( APPEND AVAILABLE_BACKENDS "reference" ) endif() @@ -74,6 +81,15 @@ if( WITH_OMP_BACKEND ) list( APPEND AVAILABLE_BACKENDS "reference_omp" ) endif() +# dependent backends +if( WITH_HYPERDAGS_BACKEND ) + list( APPEND AVAILABLE_BACKENDS "hyperdags" ) +endif() + +if( WITH_NONBLOCKING_BACKEND ) + list( APPEND AVAILABLE_BACKENDS "nonblocking" ) +endif() + # distributed memory backends if( WITH_BSP1D_BACKEND ) list( APPEND AVAILABLE_BACKENDS "bsp1d" ) diff --git a/docs/Build_and_test_infra.md b/docs/Build_and_test_infra.md index 98b144fc1..e751cb0bd 100644 --- a/docs/Build_and_test_infra.md +++ b/docs/Build_and_test_infra.md @@ -534,7 +534,9 @@ which may be set via a variable like set( EXAMPLE_BACKEND_INSTALL_DIR "${BINARY_LIBRARIES_INSTALL_DIR}/example" ) ``` -used in the following steps. +used in the following steps. The same binary file may implement multiple +backends. For example, both the reference and the OMP backend share +the same binary file, i.e., the one generated for shared memory backends. For convenience, the macro `addBackendWrapperGenOptions` is provided to automatically generate the necessary variables according to the internal naming diff --git a/docs/Development.md b/docs/Development.md index 5bdb5af28..cfe72d5a1 100644 --- a/docs/Development.md +++ b/docs/Development.md @@ -15,30 +15,221 @@ See the License for the specific language governing permissions and limitations under the License. -# Development of ALP/GraphBLAS -This document introduces the reader to the development of ALP/GraphBLAS. +# ALP Development Style Guide -ALP/GraphBLAS is written in C++11 and is mainly composed of header files with -largely templated data structures and operations. This allows both +This document introduces the reader to the development style of ALP. + +ALP is written in C++11 and is mainly composed of header files with largely +templated data structures and operations. This allows both 1. strict compile-time checking of the data types and of the algebraic abstractions (typically encoded as template parameters: see the -[Semiring class](include/graphblas/semiring.hpp) for an example) -2. specialized code generation, increasing performance - -## Code style tools and guidelines -ALP/GraphBLAS follows certain code style rules in order to ensure readability -and uniformity. - -To apply these rules, the directory `tools` contains the script -`clang-format-linter.sh` to format (*lint*, in Unix jargon) the code -accordingly, based on the `clang-format` tool. -Version 11 or higher is requested for the settings to be applied; if you want to -use a different version, you can alias it in Bash before invoking -`tools/clang-format-linter.sh`, which directly calls the command +[Semiring class](../include/graphblas/semiring.hpp) for an example); + +2. specialised code generation, increasing performance. + +Common patterns include [SFINAE](https://de.wikipedia.org/wiki/Substitution_failure_is_not_an_error) +and in particular its combination with (algebraic) type traits, as well as +copious use of `static_assert` and `constexpr`. The choice of ANSI C++11 is to +balance the benefits of these more modern C++ constructs with the typical +reluctance of applying the latest and greatest in software development tooling +within production codes. + +Given that this is a template library, there are both rigid code styles as well +as more rigid coding patterns to ensure the overall quality of the template +library-- these are detailed in their respective sections. This document also +includes a brief description of code style tools included with the repository, +as well as a section on the use of the available build and test infrastructure. + +First, however, this section concludes with some brief comments on the overall +code structure. + +## Encapsulation + +Template code that should not be exposed to ALP programmers (i.e., users of the +ALP programming interface) should be encapsulated in an internal namespace such +as, e.g., `grb::internal`. Non-templated code that should not be exposed to ALP +programmers should be defined within `.cpp` files. Only functionality that is +called by templated code should be exported during compilation of the ALP +libraries that ALP programmers would link against. All code that may be used by +ALP programmers should be documented thoroughly. + +## Utilities + +Utility functions that could be useful by ALP programmers and not just by ALP +developers, should unambiguously be housed in the `include/graphblas/utils` +directory, with the interfaces made available through the corresponding +`grb::utils` namespace. These functionalities should therefore and ideally *not* +be included in an internal namespace. + +## Test utilities + +Utility functions that are *only* useful for ALP unit, smoke, and/or performance +tests should unambiguously be housed in the `tests/utils` directory. It should +never be included with code functionalities for ALP programmers. These +functionalities should never be included with the template library, neither as a +header that could be invoked by ALP programmers, nor within an internal +namespace or within an internal `.cpp` file. + + +# Code style guidelines + +ALP follows certain code style rules in order to ensure readability and +uniformity. An informal summary of the main points follows: + +1. alignment uses **spaces** while indentation uses **tabs**; + +2. indentation is increased after a line break that does not end with `;`, + increased after a line break with an unterminated `<`, `(` or `{` and + decreased after matching `;`, `>`, `)`, and `}`. Opening and closing + delimiters are the last, resp., first characters on every line-- i.e., the + commonly accepted indentation pattern; + +3. none of `;`, `<`, `(`, `{` should appear alone on a single line-- while if + the opening delimiters like `<` follows a keyword it should do so + immediately, without intermediate spaces; + +4. when a closing delimiter is far (in a vertical space sense) from its opening + pair, it should be followed by a comment that documents what it closes; + +5. keywords that induce indentation include `private:`, `protected:`, and + `public:`, which furthermore do not induce intermediate spaces between the + keyword and the `:`; + +6. indentation of pre-processor code (macros) uses spaces, not tabs, and ignores + tab-based indentation; + +7. a single line has maximum length of about 80 characters, not including + indentation, and never ends with white spaces (space characters or tab + characters); + +8. use spaces and parentheses liberally for increasing code readability and to + limit ambiguity, including for if-else blocks or for-loop blocks that consist + only of one (or an otherwise limited number of lines); + +9. files always end with an empty line, and includes two empty lines before + implementation starts (i.e., two empty lines after any comments, macro + guards, and includes before the first line of code); + +10. Classes and types use the CamelCase naming format, variables of any kind + (static, constexpr, global, or members) use camelCase, while constants of + any kind (static const, global const, constexpr const, etc.) use CAMELCASE. + Names shall furthermore be both self-descriptive and short. Namespaces are + camelcase. + +As the saying goes, exceptions prove the rules. For example, rule #3 could be +viewed as a specific exception to rule #8. Exceptions that are not +self-contained in the above set include: + +1. one long program line under rule #7 may be arbitrarily spread over two lines + even if it runs counter rule #3-- but not if it would spread over more than + two lines; + +2. OpenMP pragmas and compiler warning suppressions may ignore rule #6-- they + may follow regular tab-based indentation instead; + +3. the 80-character limit is not strictly enforced. For example, an OpenMP macro + of 83 characters on a single line is better readable than when split over + two; + +4. brackets in code bodies that limit the scope of some of the declaration + within the body, may, contrary to rule #3, appear alone on a single line. + + +## Code style by examples: + +- `if( ... ) {`, not `if (...) {` or any other variant; + +- lines should never end with white space (tab or space characters); + +- `if( x == 5 ) {` instead of `if( x==5 ) {`; + +- only write `<<` or `>>` when doing bit shifts, never for nested templates; + +- the following is correct. It would *not* be correct to put the whole block on + a single line, nor would it be correct to write it without any curly brackets; + +```c++ +if( ... ) { + return SUCCESS; +} +``` + +- the following is correct w.r.t. vertical spacing; + +```c++ +/* + * copyright info + */ + +/** + * @file + * + * File documentation + * + * @author Author information + * @date Date of initial creation + */ + +#ifndef MACRO_GUARD +#define MACRO_GUARD + +// note that two empty lines follow: + + +namespace alp { + + // ... + +} + +#endif + +// note that one empty line follows: + +``` + +- encapsulation using curly bracket delimiters that both appear on a single + line: + +```c++ +void f( ... ) { + // some code block dubbed "A" + // ... + // end code block A + size_t ret; + { + // some code block with ields and containers that are used *solely* for + // for computing ret + // ... + ret = ...; + } + // some code that uses ret as well as fields, containers, and anything else + // that was defined in code block A +} +``` + + +# Code style tools + +There currently exist two tools to help check developer's code styles: the Clang +linter script `clang-format-linter.sh`, and the `detectSuspiciousSpacing.sh` +script. + +## Clang linter + +To automatically and approximately correctly check whether code style rules are +followed properly, the directory `tools` contains the script +`clang-format-linter.sh` that formats (*lints*, in Unix jargon) the source code, +based on the `clang-format` tool. + +Version 11 or higher of the tool is required. If you want to use a different +version, you can alias it in Bash before invoking +`tools/clang-format-linter.sh`, which otherwise directly calls the command `clang-format-11`. -This tools is available in the standard repositories of the main Linux + +This tools is available in the standard repositories of all main Linux distributions: for example, in Ubuntu you can install it with `apt-get install clang-format-11`. @@ -47,7 +238,8 @@ To list the script parameters, simply type ```bash tools/clang-format-linter.sh -h ``` -For example, to lint the file `tests/add15d.cpp` and see the lint'ed code on the + +For example, to lint the file `tests/add15d.cpp` and see the linted code on the standard output, type ```bash @@ -66,55 +258,127 @@ Instead, to lint the whole ALP/GraphBLAS code-base in-place, type tools/clang-format-linter.sh -i --lint-whole-grb ``` -The style rules enforced by the tool are - -- [x] lines are max 200 characters long, which means the line size is pretty -liberal to avoid weird re-flows -- [x] indents should be *tabs*, not spaces -- [x] alignment should be done using spaces, not tabs -- [x] essentially any line that ends in `{`, `(`, or whatever increases the -current number of indents by one and vice versa -- [x] argument lists (including template arguments) longer than 80 chars should -be broken over multiple lines -- [x] `if( `, not `if (` (also for `for`, etc.) -- [x] no lines with indents and curly brackets only: put curly brackets on the -same line as what starts that code block instead (only exception: code blocks -that are not started by standard C++ key words, but e.g. required pragmas -instead) -- [x] no lines ending with spaces -- [x] `#ifdef`, `#else`, `#endif` etc are never indented. -- [x] comment blocks are capped at 80 chars per line -- [x] include lines primarily ordered by - 1. standard includes - 2. external libraries - 3. internal headers/files - -The following rules are also mandated, but cannot currently be applied via -`clang-format`; however, developers should abide by the following guidelines as -well: - -* files should end with an empty line -* no `if`, `for`, `while`, or any other control structure without curly -* brackets, even if what follows is a single statement -* OpenMP pragmas (or any pragma) are indented as regular code -* nested `ifdef`s etc. in close proximity of one another are indented by spaces - -The following guidelines are not strictly requested nor enforced, but are -suggested to ensure readability and uniformity: - -* be gratuitous with spaces and parenthesis: anything that could possibly be -construed as confusing or ambiguous should be clarified with spaces and -parentheses if that removes (some of the) possible confusion or ambiguity -* in particular, whenever it is legal to put one or more spaces, put one -(e.g., `if( x == 5 )` instead of `if( x==5 )`) -* in particular, only write `<<` or `>>` when doing bit shifts, not when -performing template magic -* when closing a block (either `#endif` or `}`) and the block was long (whatever -long may be), add a comment on what it is that is being closed -* all functions should have `doxygen`-friendly documentation -* minimise the use of pre-processor macros (use C++11 `constexpr` instead) - -## Building and Testing infrastructure +### Warning + +This tool is only approximately correct in terms of the code style described +above(!) + + +## Automated detection of suspicious spacing + +Many code reviews have exposed erroneous use of spaces, primarily due to editors +attempting to be helpful in automatically replicating code styles like +indentations. Before committing code, a careful submitter may opt to execute +something like the following: + +``` +# go into a source directory where you have committed changes +$ cd include/graphblas/nonblocking +# **from within that directory** execute the helper script: +$ ../../../tools/detectSuspiciousSpacing.sh +``` + +If all is OK, the output of the above would print the following to the standard +output stream (which also immediately documents which patterns the script is +tailored to detect): + +``` +Detecting suspicious spacing errors in the current directory, /path/to/source/include/graphblas/nonblocking + spaces, followed by end-of-line... + tabs, followed by end-of-line... + spaces followed by a tab... +$ +``` + +Seeing no `grep` output between the noted patterns (or between the last noted +pattern and the prompt) means that no such patterns have been found within any +source file in the current directory, including source files in a subdirectory +to the current path. + + +# Coding patterns for general code quality + +Some major coding rules for maintaining high code quality include: + +1. files always display the copyright and license header, and documents the + initial author information and date of file creation; + +2. limit the use of macros and in particular, never leak macro definitions to + user code; +3. do not use `using` in a way that leaks to user code-- in particular, + never use it in headers; + +4. separate includes by their source -- e.g., a group of STL includes followed + by a group of internal utility header includes, and so on; + +5. code documentation uses [doxygen](https://www.doxygen.nl/) format, and in + particular the [Javadoc](https://www.doxygen.nl/manual/docblocks.html#cppblock) + style; + +6. use `constexpr` fields or functions in favour of any pre-processor macros, + and avoid global constants, especially those that leak to user code; + +7. performance parameters are never hardcoded but instead embedded (and + documented!) into the applicable `config.hpp` file. + + +# Building and Testing infrastructure + +To use the build and test infrastructure, see the [main README](../README.md). To modify it, you should refer to the [dedicated documentation](Build_and_test_infra.md). + + +## Testing before committing + +A careful committer may wish to run smoke or unit tests before committing to the +main repository. Such developers may wish to take note of the script contained +in the tests directory, `tests/summarise.sh`, which may be used to quickly +analyse a test log file: it summarises how many tests have passed, how many have +been skipped, and how many have failed. + +Additionally, if at least one test has failed, or if none of the tests have +succeeded (indicating perhaps a build error), then the entire log will be +`cat`-ted. + +A common use is to, in one terminal, execute: + +```bash +$ cd build +$ make -j88 smoketests &> smoketests.log +``` + +While in another, and while the above command is running, to execute: + +```bash +$ cd build +$ watch ../tests/summarise.sh smoketests.log +``` + +The second terminal then gives ``live'' feedback on the progress of the tests. + +## Continuous integration + +GitHub actions have been deployed to run smoke tests using both performance and +debug flags. These tests are run on standard images that do not include the +the datasets that some smoke tests require -- those tests are hence skipped. + +An internal CI to the Computing Systems Lab at the Huawei Zurich Research Center +exists, but can only be triggered by its employees. This CI also performs unit +tests, in addition to smoke tests. At present, however, it too does *not* employ +images that have the required dataset embedded or accessible. + +The `develop` and `master` branches are tested by the internal CI on a regular +schedule, in addition to being triggered on every push, and run a more +comprehensive combination of test suites and compilation (debug/release) flags. +Also release candidate branches (i.e., branches with names that match the +wild-card expression `*-rc*`) are subject to the same more extensive test suite. + +All CI tests at present skip tests that require data sets, and therefore +developers are suggested to not skip running local tests manually, at least once +before flagging a merge request as ready and requesting a review. Even if at +some point the CI does provide data sets, the practice of developers +self-checking MRs is recommended as it naturally also induces greater robustness +across compilers and distributions. + diff --git a/docs/Nonblocking_backend.md b/docs/Nonblocking_backend.md new file mode 100644 index 000000000..f791b36d0 --- /dev/null +++ b/docs/Nonblocking_backend.md @@ -0,0 +1,921 @@ + +

+  Copyright 2021 Huawei Technologies Co., Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+ + +# Design and implementation of the nonblocking backend + +The [C API specification](https://graphblas.org/docs/GraphBLAS_API_C_v1.3.0.pdf) of [GraphBLAS](https://graphblas.org) defines two execution modes: blocking execution and nonblocking execution. In the blocking mode, the invocation of an operation implies that the computation is completed and the result is written to memory when the function returns. The nonblocking execution allows an operation to return although the result has not been computed yet. Therefore, the nonblocking execution may delay the execution of some operations to perform optimisations. Lazy evaluation is the key idea in nonblocking execution, and computations are performed only when they are required for the sound execution of a program. + +For the description of the full design and experimental results for nonblocking execution in ALP/GraphBLAS, please read the following publications. + +* A. Mastoras, S. Anagnostidis, and A. N. Yzelman, "Design and Implementation for Nonblocking Execution in GraphBLAS: Tradeoffs and Performance," ACM Trans. Archit. Code Optim. 20, 1, Article 6 (March 2023), 23 pages, [https://doi.org/10.1145/3561652](https://doi.org/10.1145/3561652) +* A. Mastoras, S. Anagnostidis, and A. N. Yzelman, "Nonblocking execution in GraphBLAS," 2022 IEEE International Parallel and Distributed Processing Symposium Workshops (IPDPSW), 2022, pp. 230-233, doi: [10.1109/IPDPSW55747.2022.00051](10.1109/IPDPSW55747.2022.00051). + +ALP/GraphBLAS provides the `nonblocking` backend that performs multi-threaded nonblocking execution on shared-memory systems. The implementation of the `nonblocking` backend relies on that of the `reference` and `reference_omp` backends that perform sequential and multi-threaded blocking execution, respectively. + + +## Overview of the sources files + +The source files for the `nonblocking` backend are maintained under the `src/graphblas/nonblocking` directory, and the header files are maintained under `include/graphblas/nonblocking`. Most of these files exist for the `reference` backend, and the `nonblocking` backend uses some additional files. In particular, the full list of the source files for the `nonblocking` backend are the following: + +* `analytic_model.cpp` +* `init.cpp` (relies on `reference/init.cpp`) +* `io.cpp` +* `lazy_evaluation.cpp` +* `pipeline.cpp` + +from which the `analytic_model.cpp`, `lazy_evaluation.cpp`, and `pipeline.cpp` exist only for the `nonblocking` backend, and they are the main source files for the implementation of the nonblocking execution. The `init.cpp` file invokes the corresponding functions of the `reference` backend. The header files of the `nonblocking` backend include: + +* `alloc.hpp` (delegates to `reference/alloc.hpp`) +* `analytic_model.hpp` +* `benchmark.hpp` (delegates to `reference/benchmark.hpp`) +* `blas1.hpp` +* `blas2.hpp` +* `blas3.hpp` +* `boolean_dispathcer_blas1.hpp` +* `boolean_dispathcer_blas2.hpp` +* `boolean_dispathcer_io.hpp` +* `collectives.hpp` (delegates to `reference/collectives.hpp`) +* `config.hpp` +* `coordinates.hpp` +* `exec.hpp` (delegates to `reference/exec.hpp`) +* `forward.hpp` +* `init.hpp` +* `io.hpp` +* `lazy_evaluation.hpp` +* `matrix.hpp` +* `pinnedVector.hpp` +* `pipeline.hpp` +* `properties.hpp` +* `spmd.hpp` (delegates to `reference/spmd.hpp`) +* `vector.hpp` (relies on `reference/vector.hpp`) +* `vector_wrapper.hpp` + +from which the `analytic_model.hpp`, `boolean_dispathcer_blas1.hpp`, `boolean_dispathcer_blas2.hpp`, `boolean_dispathcer_io.hpp`, `lazy_evaluation.hpp`, `pipeline.hpp`, and `vector_wrapper.hpp` are used only for the `nonblocking` backend. +The current implementation supports nonblocking execution only for level-1 and level-2 operations defined in the following files: + +* `nonblocking/io.hpp` +* `nonblocking/blas1.hpp` +* `nonblocking/blas2.hpp` + +and thus most of the code for the nonblocking execution is found in these three files. The level-3 operations defined in `blas3.hpp` and some defined in `blas2.hpp` incur blocking behaviour. If a program invokes these primitives while compiled using the nonblocking backend, a warning will be emitted to the standard error stream. Please check regularly for future releases that enable native nonblocking execution for these remaining primitives. + + +## Lazy evaluation + +Lazy evaluation enables the loop fusion and loop tiling optimisations in a pure library implementation such as required by ALP/GraphBLAS. Dynamic data dependence analysis identifies operations that share data, and these operations are added as stages of the same pipeline. Operations grouped into the same pipeline may be executed in parallel and reuse data in cache. The design for nonblocking execution is fully dynamic, since the optimisations are performed at run-time and the pipelines may include operations of arbitrary control-flow. The nonblocking execution is fully automatic, since the performance parameters, i.e., the number of threads and the tile size, are selected based on an analytic model (defined in `analytic_model.cpp`). + +To illustrate lazy evaluation for the nonblocking backend, we use the `grb::set` operation that initialises all the elements of the output vector `x` with the value of an input scalar `val`. The code below shows the implementation of `grb::set` for the `reference` and `reference_omp` backends found in `reference/io.hpp`. + +```cpp +template< + Descriptor descr = descriptors::no_operation, + typename DataType, typename T, + typename Coords +> +RC set( + Vector< DataType, reference, Coords > &x, + const T val, + ... +) { + ... + + const size_t n = size( x ); + if( (descr & descriptors::dense) && nnz( x ) < n ) { + return ILLEGAL; + } + + const DataType toCopy = static_cast< DataType >( val ); + + if( !(descr & descriptors::dense) ) { + internal::getCoordinates( x ).assignAll(); + } + DataType * const raw = internal::getRaw( x ); + +#ifdef _H_GRB_REFERENCE_OMP_IO + #pragma omp parallel + { + size_t start, end; + config::OMP::localRange( start, end, 0, n ); +#else + const size_t start = 0; + const size_t end = n; +#endif + for( size_t i = start; i < end; ++ i ) { + raw[ i ] = internal::template ValueOrIndex< descr, DataType, DataType >::getFromScalar( toCopy, i ); + } +#ifdef _H_GRB_REFERENCE_OMP_IO + } +#endif + + assert( internal::getCoordinates( x ).nonzeroes() == + internal::getCoordinates( x ).size() ); + + return SUCCESS; +} +``` + +A typical operation of ALP/GraphBLAS includes a main for loop that iterates over all the elements (or only the nonzeroes) of the containers to perform the required computation. One additional step is to check if the `dense` descriptor is correctly used, i.e., none of the input and output vectors is sparse, and otherwise the error code `grb::ILLEGAL` is returned. It is also necessary to properly assign the coordinates of the output vector. In the case of the `grb::set` operation, the raw data of the output vector are initialised with the value of the input scalar within the body of the main loop. The check for the correct usage of the `dense` descriptor is performed before the main loop, and all the coordinates of the output vector are assigned by invoking `assignAll`. That is, the initialisation of the coordinates is performed in one step, since the output vector will be dense after the completion of this operation. If the `dense` descriptor is given by the user, the vector is supposed to be already dense, and thus the invocation of `assignAll` is omitted. + +To implement lazy evaluation in the ALP/GraphBLAS library implementation, the code of an operation is not necessarily executed when the corresponding function is invoked. Instead, the loop is added into a lambda function that corresponds to a stage of a pipeline, and the lambda function is stored and executed later. Lambda functions are an implementation decision that meshes well with template-based programming in ALP/GraphBLAS. The code below shows the implementation of the `grb::set` operation discussed above for the corresponding nonblocking implementation defined in `nonblocking/io.hpp`. + +```cpp +template< + Descriptor descr = descriptors::no_operation, + typename DataType, typename T, + typename Coords +> +RC set( + Vector< DataType, nonblocking, Coords > &x, const T val, + ... +) { + ... + + RC ret = SUCCESS; + + const DataType toCopy = static_cast< DataType >( val ); + DataType * const raw = internal::getRaw( x ); + const size_t n = internal::getCoordinates( x ).size(); + + constexpr const bool dense_descr = descr & descriptors::dense; + + internal::Pipeline::stage_type func = [&x, toCopy, raw] ( + internal::Pipeline &pipeline, size_t active_chunk_id, size_t max_num_chunks, size_t lower_bound, size_t upper_bound + ) { + (void) active_chunk_id; + (void) max_num_chunks; + + const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + + if( !already_dense_vectors ) { + bool already_dense_output = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( x ) ); + if( !already_dense_output ) { + Coords local_x = internal::getCoordinates( x ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + + local_x.local_assignAllNotAlreadyAssigned(); + assert( local_x.nonzeroes() == local_x.size() ); + + internal::getCoordinates( x ).asyncJoinSubset( local_x, active_chunk_id, max_num_chunks ); + } + } + + for( size_t i = lower_bound; i < upper_bound; i++ ) { + raw[ i ] = internal::template ValueOrIndex< descr, DataType, DataType >::getFromScalar( toCopy, i ); + } + + return SUCCESS; + }; + + ret = ret ? ret : internal::le.addStage( + std::move( func ), internal::Opcode::IO_SET_SCALAR, + n, sizeof( DataType ), dense_descr, true, + &x, nullptr, + &internal::getCoordinates( x ), nullptr, + nullptr, nullptr, nullptr, nullptr, + nullptr, nullptr, nullptr, nullptr + ); + + return ret; +} +``` + +The implementation of `grb::set` for the `nonblocking` backend is very similar to that of the `reference` and `reference_omp` backends. In particular, a lambda function is defined for the execution of a subset of consecutive iterations of the initial loop determined by the `lower_bound` and `upper_bound` parameters. Therefore, the main loop iterates from `lower_bound` to `upper_bound` to initialise the raw data of the output vector. The main difference between the `nonblocking` backend and the `reference` backend is the way the coordinates are handled. First, it is impossible to check if the `dense` descriptor is correctly given in the beginning of an operation, because the computation may not be completed yet due to lazy evaluation and the number of nonzeroes of a vector may not be up to date. Therefore, the check for the `dense` descriptor must be moved into the lambda function. However, the coordinates used by the `nonblocking` backend require a different mechanism than that used by the `reference` backend. The design of the coordinates mechanism for the `nonblocking` backend is presented in the next section. + + +## Handling sparse vectors + +Vectors in ALP/GraphBLAS may be either sparse or dense. In the case of dense vectors, each operation accesses all the elements as shown above with the example of `grb::set`. However, to efficiently handle sparsity, it is necessary to maintain the coordinates of the nonzeroes, such that ALP/GraphBLAS operations access only the nonzeroes. Hence, each vector includes a so-called Sparse Accumulator (SPA), consisting of the following data to handle sparsity: + +* an unsigned integer `_cap` that stores the size of the vector; +* an unsigned integer `_n` that stores the number of nonzeroes in the vector; +* a boolean array, `_assigned`, of size`_cap` that indicates if the element of a coordinate is a nonzero; and +* an unsigned integer array, `_stack`, that represents a stack and stores the coordinates of the assigned elements. + +A vector is dense when the number of nonzeroes is equal to the size of the vector, i.e., `_n = _cap`. +The stack and the `_assigned` array are used only when accessing a sparse vector. +For an empty vector, `_n = 0`, all the elements of `_assigned` are initialised to `false`, and the stack is empty. +The assignment of the i-th element of a vector implies that: +```cpp +_stack[_n] = i; +_assigned[i] = true; +_n++ +``` +Therefore, the coordinates of the nonzeroes are not sorted; they are pushed to the stack in an arbitrary order. Iterating over the nonzeroes of a sparse vector is done via the stack, and thus access to the elements may happen in any order. + +The internal representation of a vector is sufficient to correctly and efficiently handle sparse vectors for sequential execution. However, this is not the case for multi-threaded execution, since simultaneous assignments of vector elements may cause data races. Protecting the stack and the counter of nonzeroes with a global lock is a trivial solution that leads to significant performance degradation. Therefore, it is necessary to design a different mechanism that is tailored to the needs of the nonblocking execution and exploits any information about accesses of elements by different threads. + + +## Local coordinates mechanism + +The local coordinates mechanism is used for efficient handling of sparse vectors in parallel nonblocking execution and is implemented in `coordinates.hpp`. The local coordinates mechanism consists of a set of local views for the coordinates stored in the global stack. Each local view includes the coordinates of the nonzeroes for a tile of iterations, and each thread access its own local coordinates and any update to the sparsity structure of a vector is performed in the local view. The local coordinates mechanism requires initialisation of the local views before the execution of the pipeline and update of the global stack with the new nonzeroes after the execution of the pipeline. + +The local coordinates mechanism requires some additional data for each tile of a vector: + +* an unsigned integer array that stores the number of nonzeroes for each local view, which are read from the global stack during initialisation; +* an unsigned integer array that stores the number of nonzeroes that were assigned to each local view during the execution of a pipeline; +* a set of unsigned integer arrays that represent local stacks and store the local coordinates, i.e., each array corresponds to a different local view. + +The local coordinates mechanism relies on five main functions defined in `nonblocking/coordinates.hpp`. The local views are initialised via `asyncSubsetInit`. Each operation reads the state of the local view with `asyncSubset`, and it updates the state with `asyncJoinSubset` once the computation is completed. The invocation of `joinSubset` pushes the local coordinates to the global stack. None of these functions uses locks, and to avoid data races, `joinSubset` updates the global stack based on the prefix-sum computation for the number of new nonzeroes performed by `prefixSumComputation`. + +To illustrate the usage of the local coordinates mechanism in the `nonblocking` backend, we use the in-place `grb::foldl` operation shown below, which receives one output vector, one input vector and an operator. + +```cpp +template< + Descriptor descr = descriptors::no_operation, class OP, + typename IOType, typename InputType, typename Coords +> +RC foldl( + Vector< IOType, nonblocking, Coords > &x, + const Vector< InputType, nonblocking, Coords > &y, + const OP &op = OP(), + ... +) { + const size_t n = size( x ); + + ... + + RC ret = SUCCESS; + + constexpr const bool dense_descr = descr & descriptors::dense; + + internal::Pipeline::stage_type func = [&x, &y, &op, phase] ( + internal::Pipeline &pipeline, + const size_t active_chunk_id, const size_t max_num_chunks, + const size_t lower_bound, const size_t upper_bound + ) { + RC rc = SUCCESS; + + const Vector< bool, nonblocking, Coords > * const null_mask = nullptr; + const Coords * const local_null_mask = nullptr; + + Coords local_x, local_y; + const size_t local_n = upper_bound - lower_bound; + size_t local_x_nz, local_y_nz; + bool sparse = false; + + const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + + bool already_dense_output = true; + bool already_dense_input = true; + + if( !already_dense_vectors ) { + already_dense_output = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( x ) ); + if( !already_dense_output ) { + local_x = internal::getCoordinates( x ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + local_x_nz = local_x.nonzeroes(); + if( local_x_nz < local_n ) { + sparse = true; + } + } + + already_dense_input = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( y ) ); + if( !already_dense_input ) { + local_y = internal::getCoordinates( y ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + local_y_nz = local_y.nonzeroes(); + if( local_y_nz < local_n ) { + sparse = true; + } + } + } + + if( sparse ) { + // performs the computation for the sparse case + ... + } else { + // performs the computation for the dense case + ... + } + + if( !already_dense_output ) { + internal::getCoordinates( x ).asyncJoinSubset( local_x, active_chunk_id, max_num_chunks ); + } + + return rc; + }; + + ret = ret ? ret : internal::le.addStage( + std::move( func ), internal::Opcode::BLAS1_FOLD_VECTOR_VECTOR_GENERIC, + n, sizeof( IOType ), dense_descr, true, + &x, nullptr, + &internal::getCoordinates( x ), nullptr, + &y, nullptr, nullptr, nullptr, + &internal::getCoordinates( y ), nullptr, nullptr, nullptr + ); + + return ret; +} +``` + +The state of the local view is read for each vector accessed in an operation by invoking `asyncSubset`. The sparsity structure may be updated only for the output vector, and thus `asyncJoinSubset` is invoked only for the output vector to update the number of new nonzeroes. Operations consider the dense and the sparse case, and the executed path is determined at run-time based on the sparsity structure of the local coordinates. To avoid the overhead of initialising the local views, the `nonblocking` backend performs compile-time and runtime optimisations discussed in the next section. Therefore, `asyncSubset` and `asyncJoinSubset` are conditionally invoked depending on whether the corresponding vectors are already dense. + + +## Optimisations for dense vectors + +To improve the performance of nonblocking execution, it is crucial to avoid the usage of the local views when the vectors are dense. It is possible to determine whether a vector is dense based on compile-time information from descriptors and runtime analysis. The first one implies zero runtime overhead, but the descriptors must be provided by the user. + +There exist two main differences between the compile-time information from descriptors and the runtime analysis. +First, descriptors may apply to all vectors of an operation, whereas the runtime analysis applies to each individual vector of an operation. Second, descriptors refer to the vectors of a specific operation, whereas the runtime analysis refers to the state of a vector before the execution of a pipeline. + +### Compile-time descriptors + +The ALP/GraphBLAS implementation provides a set of descriptors defined in `include/graphblas/descriptors.hpp`, and they may be combined using bit-wise operators. +A descriptor is passed to an operation and indicates some information about some or all of the output and input containers, e.g., vectors and matrices. +Three of these descriptors are the following: + +* `dense` to indicate that all input and output vectors are structurally dense before the invocation; +* `structural` that ignores the values of the mask and uses only its structure, i.e., the i-th element evaluates to true if any value is assigned to it; and +* `invert_mask` that inverts the mask. + +The `dense` and `structural` descriptors may affect both correctness and performance, and `invert_mask` affect only the correctness of an operation. These three descriptors may be used to perform optimisations for the local coordinates mechanism. In particular, if the dense descriptor is provided, it implies that all the vectors accessed in an operation are dense before the invocation. Therefore, an operation can safely iterate over all the elements of the vectors without using neither the global nor the local coordinates. + +One exception is an out-of-place operation that receives a mask, since the dense descriptor itself does not guarantee that all the elements of a dense mask evaluate to true. Therefore, a dense output vector may become sparse once the computation is completed. That is, the output vector becomes empty in the beginning of the operation, and then each of its coordinates may be assigned depending on whether the corresponding element of the mask evaluates to true or not. Reading the elements of a mask does not require usage of the local coordinates when the dense descriptor is given. However, to avoid the usage of the local coordinates for the output vector of an out-of-place operation that receives a mask, both the `structural` and the `invert_mask` descriptors should be given in addition to the `dense` descriptor. + +### Runtime analysis + +The runtime analysis for dense vectors relies on a simple property of ALP/GraphBLAS. A vector that is already dense before the execution of a pipeline cannot become sparse during the execution of the pipeline unless the pipeline contains an out-of-place operation, i.e., `grb::set`, `grb::eWiseApply`, or `grb::clear` that makes the vector empty. The current design for nonblocking execution in ALP/GraphBLAS allows pipelines that include an out-of-place operation but does not allow pipelines that include the `grb::clear` operation. + +The nonblocking execution relies on the runtime analysis to determine whether a vector is already dense before the execution of a pipeline, only when the `dense` descriptor is not given by the user. For each already dense vector of a pipeline, neither the global nor the local coordinates are used unless the vector is the output of an out-of-place operation. Therefore, the overhead of the local coordinates mechanism is completely avoided. + +### Implementation of the optimisation + +To illustrate the implementation of the compile-time and runtime optimisations for dense vectors, we use one example of an in-place and one example of an out-of-place operation. +The runtime analysis relies on the `allAlreadyDenseVectors` function that returns `true` when all the vectors accessed in a pipeline are already dense, and on `containsAlreadyDenseContainer` that returns `true` when a specific vector accessed in a pipeline is already dense. + +#### In-place operations + +In the case of an in-place operation, we use the example of the `grb::foldl` operation discussed earlier. +The code below is included in the lambda function of `grb::foldl`. + +```cpp +const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + +bool already_dense_output = true; +bool already_dense_input = true; + +if( !already_dense_vectors ) { + already_dense_output = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( x ) ); + if( !already_dense_output ) { + local_x = internal::getCoordinates( x ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + local_x_nz = local_x.nonzeroes(); + if( local_x_nz < local_n ) { + sparse = true; + } + } + + already_dense_input = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( y ) ); + if( !already_dense_input ) { + local_y = internal::getCoordinates( y ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + local_y_nz = local_y.nonzeroes(); + if( local_y_nz < local_n ) { + sparse = true; + } + } +} + +... + +if( !already_dense_output ) { + internal::getCoordinates( x ).asyncJoinSubset( local_x, active_chunk_id, max_num_chunks ); +} +``` + +The variable `already_dense_vectors` indicates whether all the vectors accessed in this operation are already dense based on compile-time or runtime information. +In addition, one variable is declared for each vector to indicate whether a vector is already dense, i.e., the variables `already_dense_output` and `already_dense_input` are initialised to `true`, assuming that the vectors are already dense. +If `already_dense_vectors` is evaluated to true, the state of the local views is not read and the assumption for already dense vectors is correct. +Otherwise, it is necessary to check if each vector accessed in the operation is already dense, and if this is not the case, the state of the local view is read by invoking `asyncSubset`. +The update of the state for the local view is performed once the computation is completed via `asyncJoinSubset` only when the output vector is not already dense. + +#### Out-of-place operations + +For the implementation of the optimisation for dense vectors of an out-of-place operation, we use the example of the `grb::eWiseApply` operation defined in `blas1.hpp`. +There exist four main scenarios we need to consider, depending on whether the output vector for a tile needs to become empty, dense, or both empty and dense, and whether the operation receives a mask. + +##### Out-of-place operation with a potentially sparse output vector + +In the case that the input consists of three vectors, the output vector will have an a-priori unknown sparsity structure. +Therefore, unless all vectors are already dense, it is necessary to initialise the state of the output vector via `asyncSubset` and clear the coordinates of each local view by invoking `local_clear`. +In contrast to an in-place operation, the decision about reading and updating the state of the output vector does not depend on whether the output vector is already dense, +since an already dense output vector may become sparse depending on the sparsity structure of the input vectors. + +Since the current design for nonblocking execution does not allow the number of nonzeroes to decrease, it is necessary to reset the global counter of nonzeroes by invoking `reset_global_nnz_counter`. +The `local_clear` function updates properly the number of new nonzeroes that should be written later to the global stack by `joinSubset`, i.e., all the nonzeroes of the local view are considered as new. +In addition, the output vector is marked as potentially sparse by invoking `markMaybeSparseContainer`. +Both of these functions are invoked only by the thread that executes the first tile, i.e., when `lower_bound = 0`. + +```cpp +template< + Descriptor descr = descriptors::no_operation, class Monoid, + typename OutputType, typename InputType1, typename InputType2, + typename Coords +> +RC eWiseApply( + Vector< OutputType, nonblocking, Coords > &z, + const Vector< InputType1, nonblocking, Coords > &x, + const Vector< InputType2, nonblocking, Coords > &y, + const Monoid &monoid = Monoid(), + ... +) { + const size_t n = internal::getCoordinates( z ).size(); + + ... + + RC ret = SUCCESS; + + constexpr const bool dense_descr = descr & descriptors::dense; + + internal::Pipeline::stage_type func = [&z, &x, &y, &monoid, phase] ( + internal::Pipeline &pipeline, + const size_t active_chunk_id, const size_t max_num_chunks, + const size_t lower_bound, const size_t upper_bound + ) { + RC rc = SUCCESS; + + const Vector< bool, nonblocking, Coords > * const null_mask = nullptr; + const Coords * const local_null_mask = nullptr; + + Coords local_x, local_y, local_z; + + const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + + bool already_dense_input_x = true; + bool already_dense_input_y = true; + + if( !already_dense_vectors ) { + local_z = internal::getCoordinates( z ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + + already_dense_input_x = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( x ) ); + if( !already_dense_input_x ) { + local_x = internal::getCoordinates( x ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + } + + already_dense_input_y = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( y ) ); + if( !already_dense_input_y ) { + local_y = internal::getCoordinates( y ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + } + } + + const internal::Wrapper< false, InputType1, Coords > x_wrapper( x ); + const internal::Wrapper< false, InputType2, Coords > y_wrapper( y ); + + const auto op = monoid.getOperator(); + + if( !already_dense_vectors ) { + local_z.local_clear(); + if( lower_bound == 0 ) { + internal::getCoordinates( z ).reset_global_nnz_counter(); + pipeline.markMaybeSparseContainer( &internal::getCoordinates( z ) ); + } + } + + // performs the computation + ... + + if( !already_dense_vectors ) { + internal::getCoordinates( z ).asyncJoinSubset( local_z, active_chunk_id, max_num_chunks ); + } + + return rc; + }; + + ret = ret ? ret : internal::le.addStage( + std::move( func ), internal::Opcode::BLAS1_EWISEAPPLY, + n, sizeof( OutputType ), dense_descr, true, + &z, nullptr, + &internal::getCoordinates( z ), nullptr, + &x, &y, nullptr, nullptr, + &internal::getCoordinates( x ), &internal::getCoordinates( y ), nullptr, nullptr + ); + + return ret; +} +``` + +##### Out-of-place operation with a dense output vector + +In the case that the input consists of a scalar and a monoid, it is guaranteed that the output vector will be dense. +Therefore, the only criterion to avoid the usage of the local views is whether the output vector is already dense. +If the output vector is not already dense, then the state of the local view is read, all the not assigned coordinates are assigned by invoking `local_assignAllNotAlreadyAssigned`, and the state is updated via `asyncJoinSubset`. + +```cpp + +template< + Descriptor descr = descriptors::no_operation, class Monoid, + typename OutputType, typename InputType1, typename InputType2, + typename Coords +> +RC eWiseApply( + Vector< OutputType, nonblocking, Coords > &z, + const InputType1 alpha, + const Vector< InputType2, nonblocking, Coords > &y, + const Monoid &monoid = Monoid(), + ... +) { + const size_t n = internal::getCoordinates( z ).size(); + + ... + + RC ret = SUCCESS; + + constexpr const bool dense_descr = descr & descriptors::dense; + + internal::Pipeline::stage_type func = [&z, alpha, &y, &monoid] ( + internal::Pipeline &pipeline, + const size_t active_chunk_id, const size_t max_num_chunks, + const size_t lower_bound, const size_t upper_bound + ) { + RC rc = SUCCESS; + + Coords local_x, local_y, local_z; + + const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + + bool already_dense_output = true; + bool already_dense_input_y = true; + + already_dense_output = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( z ) ); + if( !already_dense_output ) { + local_z = internal::getCoordinates( z ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + } + + if( !already_dense_vectors ) { + already_dense_input_y = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( y ) ); + if( !already_dense_input_y ) { + local_y = internal::getCoordinates( y ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + } + } + + const internal::Wrapper< true, InputType1, Coords > x_wrapper( alpha ); + const internal::Wrapper< false, InputType2, Coords > y_wrapper( y ); + + const auto &op = monoid.getOperator(); + + if( !already_dense_output ) { + local_z.local_assignAllNotAlreadyAssigned(); + } + + // performs the computation + ... + + if( !already_dense_output ) { + internal::getCoordinates( z ).asyncJoinSubset( local_z, active_chunk_id, max_num_chunks ); + } + + return rc; + }; + + ret = ret ? ret : internal::le.addStage( + std::move( func ), internal::Opcode::BLAS1_EWISEAPPLY, + n, sizeof( OutputType ), dense_descr, true, + &z, nullptr, + &internal::getCoordinates( z ), nullptr, + &y, nullptr, nullptr, nullptr, + &internal::getCoordinates( y ), nullptr, nullptr, nullptr + ); + + return ret; +} +``` + +##### Out-of-place operation with an output vector that consists of some potentially sparse tiles and some dense tiles + +In the case that the input consists of an operator instead of a monoid, the output vector may become sparse after the computation unless all vectors are already dense. +Therefore, the global counter of nonzeroes is reset, and the decision about clearing the local coordinates or assigning all of them is made separately for each local view. +The vector is marked as potentially sparse when the local coordinates are cleared for at least one of the tiles. + +```cpp +template< + Descriptor descr = descriptors::no_operation, class OP, + typename OutputType, typename InputType1, typename InputType2, + typename Coords +> +RC eWiseApply( + Vector< OutputType, nonblocking, Coords > &z, + const InputType1 alpha, + const Vector< InputType2, nonblocking, Coords > &y, + const OP &op = OP(), + ... +) { + const size_t n = internal::getCoordinates( z ).size(); + + ... + + RC ret = SUCCESS; + + constexpr const bool dense_descr = descr & descriptors::dense; + + internal::Pipeline::stage_type func = [&z, alpha, &y, &op] ( + internal::Pipeline &pipeline, + const size_t active_chunk_id, const size_t max_num_chunks, + const size_t lower_bound, const size_t upper_bound + ) { + RC rc = SUCCESS; + + const Vector< bool, nonblocking, Coords > * const null_mask = nullptr; + const Coords * const local_null_mask = nullptr; + + Coords local_mask, local_x, local_y, local_z; + const size_t local_n = upper_bound - lower_bound; + size_t local_y_nz = local_n; + + const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + + bool already_dense_input_y = true; + + if( !already_dense_vectors ) { + local_z = internal::getCoordinates( z ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + + already_dense_input_y = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( y ) ); + if( !already_dense_input_y ) { + local_y = internal::getCoordinates( y ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + local_y_nz = local_y.nonzeroes(); + } + } + + const internal::Wrapper< true, InputType1, Coords > x_wrapper( alpha ); + const internal::Wrapper< false, InputType2, Coords > y_wrapper( y ); + + if( !already_dense_vectors ) { + if( lower_bound == 0 ) { + internal::getCoordinates( z ).reset_global_nnz_counter(); + } + } + + if( (descr & descriptors::dense) || local_y_nz == local_n ) { + if( !already_dense_vectors ) { + local_z.local_assignAll( ); + } + + // performs the computation for the dense case + ... + } else { + if( !already_dense_vectors ) { + local_z.local_clear(); + pipeline.markMaybeSparseContainer( &internal::getCoordinates( z ) ); + } + + // performs the computation for the sparse case + ... + } + + if( !already_dense_vectors ) { + internal::getCoordinates( z ).asyncJoinSubset( local_z, active_chunk_id, max_num_chunks ); + } + + return rc; + }; + + ret = ret ? ret : internal::le.addStage( + std::move( func ), internal::Opcode::BLAS1_EWISEAPPLY, + n, sizeof( OutputType ), dense_descr, true, + &z, nullptr, + &internal::getCoordinates( z ), nullptr, + &y, nullptr, nullptr, nullptr, + &internal::getCoordinates( y ), nullptr, nullptr, nullptr + ); + + return ret; +} +``` + +##### Out-of-place operation that receives a mask + +In the case that an out-of-place operation receives a mask, a second variable, `mask_is_dense`, is used to indicate whether the mask is dense based on compile-time information from descriptors or the runtime analysis for already dense vectors. +Then, all the decisions about the output vector are made based on this variable. +In addition, the function `markMaybeSparseDenseDescriptorVerification` is invoked to mark the output vector as potentially sparse when the `dense` descriptor is provided and the elements of the mask may be evaluated to `false` as explained in the section about the dense descriptor verification. + +```cpp +template< + Descriptor descr = descriptors::no_operation, class Monoid, + typename OutputType, typename MaskType, + typename InputType1, typename InputType2, + typename Coords +> +RC eWiseApply( + Vector< OutputType, nonblocking, Coords > &z, + const Vector< MaskType, nonblocking, Coords > &mask, + const InputType1 alpha, + const Vector< InputType2, nonblocking, Coords > &y, + const Monoid &monoid = Monoid(), + ... +) { + const size_t n = internal::getCoordinates( z ).size(); + + ... + + RC ret = SUCCESS; + + constexpr const bool dense_descr = descr & descriptors::dense; + constexpr const bool dense_mask = dense_descr && (descr & descriptors::structural) && !(descr & descriptors::invert_mask); + + internal::Pipeline::stage_type func = [&z, &mask, alpha, &y, &monoid] ( + internal::Pipeline &pipeline, + const size_t active_chunk_id, const size_t max_num_chunks, + const size_t lower_bound, const size_t upper_bound + ) { + RC rc = SUCCESS; + + Coords local_mask, local_x, local_y, local_z; + const size_t local_n = upper_bound - lower_bound; + + const bool already_dense_vectors = dense_descr || pipeline.allAlreadyDenseVectors(); + + const bool mask_is_dense = (descr & descriptors::structural) && + !(descr & descriptors::invert_mask) && already_dense_vectors; + + bool already_dense_mask = true; + bool already_dense_input_y = true; + + if( !mask_is_dense ) { + local_z = internal::getCoordinates( z ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + if( dense_descr && local_z.nonzeroes() < local_n ) { + return ILLEGAL; + } + } + + if( !already_dense_vectors ) { + already_dense_mask = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( mask ) ); + if( !already_dense_mask ) { + local_mask = internal::getCoordinates( mask ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + } + + already_dense_input_y = pipeline.containsAlreadyDenseContainer( &internal::getCoordinates( y ) ); + if( !already_dense_input_y ) { + local_y = internal::getCoordinates( y ).asyncSubset( active_chunk_id, max_num_chunks, lower_bound, upper_bound ); + } + } + + const internal::Wrapper< true, InputType1, Coords > x_wrapper( alpha ); + const internal::Wrapper< false, InputType2, Coords > y_wrapper( y ); + + const InputType2 right_identity = monoid.template getIdentity< InputType2 >(); + const auto &op = monoid.getOperator(); + + if( !mask_is_dense ) { + local_z.local_clear(); + if( lower_bound == 0 ) { + internal::getCoordinates( z ).reset_global_nnz_counter(); + pipeline.markMaybeSparseContainer( &internal::getCoordinates( z ) ); + if( dense_descr ) { + pipeline.markMaybeSparseDenseDescriptorVerification( &internal::getCoordinates( z ) ); + } + } + } + + // performs the computation + ... + + if( !mask_is_dense ) { + internal::getCoordinates( z ).asyncJoinSubset( local_z, active_chunk_id, max_num_chunks ); + } + + return rc; + }; + + ret = ret ? ret : internal::le.addStage( + std::move( func ), internal::Opcode::BLAS1_MASKED_EWISEAPPLY, + n, sizeof( OutputType ), dense_descr, dense_mask, + &z, nullptr, + &internal::getCoordinates( z ), nullptr, + &y, &mask, nullptr, nullptr, + &internal::getCoordinates( y ), &internal::getCoordinates( mask ), nullptr, nullptr + ); + + return ret; +} +``` + + +## Pipeline execution + +The nonblocking execution in ALP/GraphBLAS expresses operations as a linear sequence of stages that form a pipeline. The execution of a pipeline is performed when the computation is necessary for the sound execution of the program. Opaqueness guarantees that lazy evaluation is safe when the output of an operation is a container, i.e., a vector or a matrix. The current version of ALP/GraphBLAS does not implement scalars as opaque data types according to the [version 1.3.0](https://graphblas.org/docs/GraphBLAS_API_C_v1.3.0.pdf) of the C API specification. Opaque scalars were introduced later in the [version 2.0.0](https://graphblas.org/docs/GraphBLAS_API_C_v2.0.0.pdf) and may further improve the performance of nonblocking execution. + +A pipeline must be executed in the following cases: + +* the user explicitly extracts data from a container by using the ALP/GraphBLAS API, e.g., when reading the elements of a vector by using iterators; + +* the user invokes the constructor of a container; + +* memory is deallocated due to a destructor invocation; + +* the invoked operation returns a scalar, e.g., the `grb::dot` operation, in particular, the operation is first added into the pipeline, and then the pipeline is executed immediately before returning the scalar; + +* when a sparse matrix–vector multiplication (SpMV) operation is added into a pipeline with another operation that overwrites the input vector to the SpMV; + +* when the user explicitly forces the execution of a pipeline via a call to `grb::wait`. + +Although level-3 operations are not yet implemented for nonblocking execution, a sparse matrix–sparse matrix multiplication (SpMSpM) operation implies the same constraint with SpMV, i.e., the SpMSpM operation cannot be fused together with another operation that overwrites any of the SpMSpM input matrices. + +When a new stage is added to a pipeline, the pipeline execution is performed within the `addStage` function of `lazy_evaluation.cpp`, which implements the dynamic data dependence analysis and identifies any shared data between operations. The pipeline execution due to explicit invocation of iterators or constructors or memory deallocation is performed in `vector.hpp`. The execution of a pipeline caused by `grb::wait` is implemented in `io.hpp`. + +The code for the pipeline execution is found in the `execution` method of `pipeline.cpp`. The execution is performed in four main steps, three of which may be omitted when the pipeline does not include any out-of-place operation and all accessed vectors are dense. Simplified code for the execution of the four main steps is shown below. + +```cpp +bool initialized_coordinates = false; + +#pragma omp parallel for private(vt, pt) schedule(dynamic) num_threads(nthreads) +for( size_t tile_id = 0; tile_id < tiles; ++tile_id ) { + ... + for( vt = vbegin(); vt != vend(); ++vt ) { + ... + (**vt).asyncSubsetInit( tile_id, tiles, lower_bound, upper_bound ); + initialized_coordinates = true; + } +} + +#pragma omp parallel for private(vt, pt) schedule(dynamic) num_threads(nthreads) +for( size_t tile_id = 0; tile_id < tiles; ++tile_id ) { + ... + RC local_ret = SUCCESS; + for( pt = pbegin(); pt != pend(); ++pt ) { + local_ret = local_ret ? local_ret : (*pt)( *this, tile_id, tiles, lower_bound, upper_bound ); + } + if( local_ret != SUCCESS ) { + ret = local_ret; + } +} + +if( initialized_coordinates ) { + bool new_nnz = false; + + for( vt = vbegin(); vt != vend(); ++vt ) { + ... + if( (**vt).newNonZeroes( tiles ) ) { + new_nnz = true; + (**vt).prefixSumComputation( tiles ); + } + } + + if( new_nnz ) { + #pragma omp parallel for private(vt) schedule(dynamic) num_threads(nthreads) + for( size_t tile_id = 0; tile_id < tiles; ++tile_id ) { + ... + for( vt = vbegin(); vt != vend(); ++vt ) { + ... + if( (**vt).newNonZeroes( tiles ) ) { + (**vt).joinSubset( tile_id, tiles, lower_bound, upper_bound ); + } + } + } + } +} +``` +The local views of each vector accessed in the pipeline are initialised via `asyncSubsetInit`, and then the pipeline is executed. Once the execution is completed, the local views may contain a number of new nonzeroes that must be pushed to the global stack by `joinSubset`. Before this step, it is necessary to perform the prefix-sum computation for the number of new nonzeroes of each local view by invoking `prefixSumComputation`. All these steps may be executed in parallel for different tiles of the vectors as shown with the OpenMP directives, except for the prefix-sum computation that is parallelised internally. The scheduling policy used for OpenMP is dynamic to handle load imbalance, and the performance parameters, i.e., the number of threads and the tile size used in the lambda functions, are automatically selected by the analytic model (see `analytic_model.cpp`). + + +## Analytic performance model + +The analytic performance model used for nonblocking execution consists of the `getPerformanceParameters` function defined in `analytic_model.cpp`, and this function is invoked before the pipeline execution within the `execution` method in `pipeline.cpp`. The analytic model makes an estimation about the number of threads and the tile size that lead to good performance for the execution of a given pipeline, and the estimation is based on various parameters such as the number of vectors accessed in the pipeline, the data type of the vectors, and the size of the vectors. Two additional parameters of special importance are the size of the L1 cache and the number of cores available in the system, since the selected tile size must allow data fit in L1 cache and there should be sufficient work to utilise as many cores as possible. + +The analytic model relies on two environment variables: + +* `OMP_NUM_THREADS` +* `GRB_NONBLOCKING_TILE_SIZE` + +for the number of threads used by OpenMP and the tile size used by the nonblocking backend, respectively. The number of threads determined by the environment variable is an upper bound for the number of threads that may be selected by the analytic model. If the environment variable for the tile size is set, a fixed tile size is used for all executed pipelines. Otherwise, the analytic model automatically selects a proper tile size, depending on the parameters of the executed pipeline. + +The initialisation for the number of threads used by OpenMP and the manual tile size is performed in `init.cpp`, and the data of the analytic model are handled by the `ANALYTIC_MODEL` and `IMPLEMENTATION` classes of `config.hpp`. + + +## Dense descriptor verification + +The correct usage of the `dense` descriptor, for the blocking execution, is checked in the beginning of each ALP/GraphBLAS operation. +If there exists at least one input or output vector that is not dense, then the `grb::ILLEGAL` error code is returned as shown in the example below. + +```cpp +const size_t n = size( x ); +if( (descr & descriptors::dense) && nnz( x ) < n ) { + return ILLEGAL; +} +``` + +For the nonblocking execution, checking the correct usage of the `dense` descriptor requires a different process, since the number of nonzeroes in the vectors may not be up to date due to lazy evaluation. +In particular, the check is moved within the lambda function defined for each operation, and the check for the sparsity structure is based on the local views. +However, the optimisation employed by the nonblocking execution for already dense vectors implies that the local views are not always available. +Therefore, it is not always possible to perform the check for correct usage of the `dense` descriptor within the lambda function of an operation. + +The verification process for correct usage of the `dense` descriptor relies on the following property: + +*A vector that should be dense when an operation is invoked, should remain dense after the execution of the pipeline, unless this vector is the output of an out-of-place operation that receives a mask with elements that may be evaluated to `false`*. + +Therefore, the `nonblocking` backend delays the check and performs the verification for correct usage of the `dense` descriptor after the pipeline execution. +To keep track of the vectors that should be dense after the execution of the pipeline, the addition of a lambda function as a stage of a pipeline is accompanied by a boolean variable, called `dense_descr`, that indicates if the `dense` descriptor is given for this operation. +In the case of an out-of-place operation that receives a mask, e.g., `grb::eWiseApply` discussed earlier, the output vector may be marked as potentially sparse when the `dense` descriptor is provided, by invoking `markMaybeSparseDenseDescriptorVerification` as shown in the example of `grb::eWiseApply` above. +In this case, the dense descriptor verification is disabled for the output vector of this specific operation. + +This solution is efficient and catches most cases of an illegal `dense` descriptor. +However, it cannot catch an illegal usage of the `dense` descriptor for an operation that receives a sparse vector, which becomes dense during the execution of the pipeline, since it is impossible to detect that the vector was not dense earlier. + diff --git a/docs/Suppressions.md b/docs/Suppressions.md index 1915147b5..630b044ab 100644 --- a/docs/Suppressions.md +++ b/docs/Suppressions.md @@ -48,41 +48,17 @@ if( masked ) { ``` 4. `include/graphblas/base/internalops.hpp`, multiple sources: -- mul::apply, add::apply, add::foldl, equal::apply, not_equal::apply. +- mul::apply, add::apply, add::foldl, equal::apply, not_equal::apply, and + logical_and::foldl. These are indirectly caused by the following calls: - `include/graphblas/blas0.hpp`, apply; - `include/graphblas/reference/blas1.hpp`, dot_generic, masked_apply_generic, - and sparse_apply_generic. + sparse_apply_generic, and fold_from_vector_to_scalar_generic. These are all OK to suppress since the reads are masked. -5. `include/graphblas/reference/blas1.hpp`, fold_from_vector_to_scalar_generic: -``` -GRB_UTIL_IGNORE_MAYBE_UNINITIALIZED // the below code ensures to set local -IOType local; // whenever our local block is -GRB_UTIL_RESTORE_WARNINGS // non-empty -if( end > 0 ) { - if( i < end ) { - local = static_cast< IOType >( internal::getRaw( to_fold )[ i ] ); - } else { - local = static_cast< IOType >( internal::getRaw( to_fold )[ 0 ] ); - } -} -``` -and -``` -if( root == s ) { - // then I should be non-empty - assert( !empty ); - // set global value to locally computed value - GRB_UTIL_IGNORE_MAYBE_UNINITIALIZED // one is only root if the local - global = local; // chunk is non-empty, in which case - GRB_UTIL_RESTORE_WARNINGS // local will be initialised (above) - } -``` - -6. `include/graphblas/reference/blas1.hpp`, masked_apply_generic: +5. `include/graphblas/reference/blas1.hpp`, masked_apply_generic: ``` if( mask_b[ t ] ) { // ... @@ -91,3 +67,18 @@ if( mask_b[ t ] ) { GRB_UTIL_RESTORE_WARNINGS // if mask_b is true ``` +6. `include/graphblas/nonblocking/blas1.hpp`, masked_apply_generic: +``` +for( size_t k = 0; k < block_size; ++k ) { + const size_t index = i + k; + assert( index < local_n + lower_bound ); + if( mask_b[ k ] ) { + (void) local_z.assign( index - lower_bound ); + GRB_UTIL_IGNORE_MAYBE_UNINITIALIZED // This is only triggered with + *( z_p + index ) = z_b[ k ]; // mask_b[ k ], which in the above + GRB_UTIL_RESTORE_WARNINGS // loop also triggeres initialising + // z_b[ k ] + } +} +``` + diff --git a/docs/doxy.conf b/docs/doxy.conf index d91dae080..d1e63f220 100644 --- a/docs/doxy.conf +++ b/docs/doxy.conf @@ -1,20 +1,4 @@ -# Doxyfile 1.8.14 - -# -# Copyright 2021 Huawei Technologies Co., Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# +# Doxyfile 1.9.3 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -33,10 +17,10 @@ # Project related configuration options #--------------------------------------------------------------------------- -# This tag specifies the encoding used for all characters in the config file -# that follow. The default is UTF-8 which is also the encoding used for all text -# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv -# built into libc) for the transcoding. See +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See # https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. @@ -48,19 +32,19 @@ DOXYFILE_ENCODING = UTF-8 # title of most generated pages and in a few other places. # The default value is: My Project. -PROJECT_NAME = "ALP/GraphBLAS" +PROJECT_NAME = "ALP Developer Documentation" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.6.0 +PROJECT_NUMBER = 0.7.0 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. -PROJECT_BRIEF = +PROJECT_BRIEF = "Algebraic Programming Developer Documentation" # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 @@ -74,7 +58,7 @@ PROJECT_LOGO = # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. -OUTPUT_DIRECTORY = docs/code +OUTPUT_DIRECTORY = docs/developer # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and @@ -149,7 +133,7 @@ ALWAYS_DETAILED_SEC = NO # operators of the base classes will not be shown. # The default value is: NO. -INLINE_INHERITED_MEMB = NO +INLINE_INHERITED_MEMB = YES # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the @@ -195,6 +179,16 @@ SHORT_NAMES = NO JAVADOC_AUTOBRIEF = YES +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus @@ -215,6 +209,14 @@ QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. @@ -238,21 +240,19 @@ TAB_SIZE = 4 # the documentation. An alias has the form: # name=value # For example adding -# "sideeffect=@par Side Effects:\n" +# "sideeffect=@par Side Effects:^^" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading -# "Side Effects:". You can put \n's in the value part of an alias to insert -# newlines (in the resulting output). You can put ^^ in the value part of an -# alias to insert a newline as if a physical newline was in the original file. +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) ALIASES = -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all @@ -281,28 +281,40 @@ OPTIMIZE_FOR_FORTRAN = NO OPTIMIZE_OUTPUT_VHDL = NO +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and -# language is one of the parsers supported by doxygen: IDL, Java, Javascript, -# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: -# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: -# Fortran. In the later case the parser tries to guess whether the code is fixed -# or free formatted code, this is the default for Fortran type files), VHDL. For -# instance to make doxygen treat .inc files as Fortran files (default is PHP), -# and .f files as C (default is Fortran), use: inc=Fortran f=C. +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable -# documentation. See http://daringfireball.net/projects/markdown/ for details. +# documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. @@ -314,7 +326,7 @@ MARKDOWN_SUPPORT = YES # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. -# Minimum value: 0, maximum value: 99, default value: 0. +# Minimum value: 0, maximum value: 99, default value: 5. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 @@ -430,6 +442,19 @@ TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 0 +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- @@ -448,13 +473,19 @@ EXTRACT_ALL = NO # be included in the documentation. # The default value is: NO. -EXTRACT_PRIVATE = NO +EXTRACT_PRIVATE = YES + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. -EXTRACT_PACKAGE = NO +EXTRACT_PACKAGE = YES # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. @@ -476,7 +507,7 @@ EXTRACT_LOCAL_CLASSES = YES # included. # The default value is: NO. -EXTRACT_LOCAL_METHODS = NO +EXTRACT_LOCAL_METHODS = YES # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called @@ -487,6 +518,13 @@ EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation @@ -504,8 +542,8 @@ HIDE_UNDOC_MEMBERS = NO HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO, these declarations will be -# included in the documentation. +# declarations. If set to NO, these declarations will be included in the +# documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO @@ -522,13 +560,20 @@ HIDE_IN_BODY_DOCS = NO # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. -INTERNAL_DOCS = NO - -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# and Mac users are advised to set this option to NO. +INTERNAL_DOCS = YES + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. # The default value is: system dependent. CASE_SENSE_NAMES = YES @@ -547,6 +592,12 @@ HIDE_SCOPE_NAMES = YES HIDE_COMPOUND_REFERENCE= NO +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. @@ -704,7 +755,8 @@ FILE_VERSION_FILTER = # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml -# will be used as the name of the layout file. +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE @@ -750,23 +802,35 @@ WARNINGS = YES WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for -# potential errors in the documentation, such as not documenting some parameters -# in a documented function, or documenting parameters that don't exist or using -# markup commands wrongly. +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return -# value. If set to NO, doxygen will only warn about wrong or incomplete -# parameter documentation, but not about the absence of documentation. +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC # The default value is: NO. WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO @@ -783,7 +847,10 @@ WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard -# error (stderr). +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). WARN_LOGFILE = @@ -802,8 +869,8 @@ INPUT = include/ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 @@ -816,11 +883,15 @@ INPUT_ENCODING = UTF-8 # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, -# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, -# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, -# *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf and *.qsf. +# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, +# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C +# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.hpp \ *.cpp \ @@ -862,7 +933,7 @@ EXCLUDE_PATTERNS = # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, -# AClass::ANamespace, ANamespace::*Test +# ANamespace::AClass, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* @@ -980,7 +1051,7 @@ INLINE_SOURCES = NO STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented -# function all documented functions referencing it will be listed. +# entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO @@ -1017,7 +1088,7 @@ SOURCE_TOOLTIPS = YES # # To use it do the following: # - Install the latest version of global -# - Enable SOURCE_BROWSER and USE_HTAGS in the config file +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # @@ -1050,13 +1121,6 @@ VERBATIM_HEADERS = YES ALPHABETICAL_INDEX = YES -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored @@ -1156,7 +1220,7 @@ HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to -# this color. Hue is specified as an angle on a colorwheel, see +# this color. Hue is specified as an angle on a color-wheel, see # https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. @@ -1166,7 +1230,7 @@ HTML_EXTRA_FILES = HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors -# in the HTML output. For a value of 0 the output will use grayscales only. A +# in the HTML output. For a value of 0 the output will use gray-scales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1195,9 +1259,9 @@ HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that -# are dynamically created via Javascript. If disabled, the navigation index will +# are dynamically created via JavaScript. If disabled, the navigation index will # consists of multiple levels of tabs that are statically embedded in every HTML -# page. Disable this option to support browsers that do not have Javascript, +# page. Disable this option to support browsers that do not have JavaScript, # like the Qt help browser. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1227,13 +1291,14 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/tools/xcode/), introduced with -# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at -# startup. See https://developer.apple.com/tools/creatingdocsetswithdoxygen.html -# for more information. +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1247,6 +1312,13 @@ GENERATE_DOCSET = NO DOCSET_FEEDNAME = "Doxygen generated docs" +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. @@ -1272,8 +1344,12 @@ DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML @@ -1303,7 +1379,7 @@ CHM_FILE = HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). +# (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1348,7 +1424,8 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: http://doc.qt.io/qt-4.8/qthelpproject.html#namespace). +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1356,7 +1433,8 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: http://doc.qt.io/qt-4.8/qthelpproject.html#virtual-folders). +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1364,28 +1442,30 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: http://doc.qt.io/qt-4.8/qthelpproject.html#custom-filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: -# http://doc.qt.io/qt-4.8/qthelpproject.html#filter-attributes). +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = @@ -1428,16 +1508,28 @@ DISABLE_INDEX = NO # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can -# further fine-tune the look of the index. As an example, the default style -# sheet generated by doxygen has an example that shows how to put an image at -# the root of the tree instead of the PROJECT_NAME. Since the tree basically has -# the same information as the tab index, you could consider setting -# DISABLE_INDEX to YES when enabling this option. +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # @@ -1462,6 +1554,24 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML @@ -1482,8 +1592,14 @@ FORMULA_FONTSIZE = 10 FORMULA_TRANSPARENT = YES +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see -# https://www.mathjax.org) which uses client side Javascript for the rendering +# https://www.mathjax.org) which uses client side JavaScript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path @@ -1493,11 +1609,29 @@ FORMULA_TRANSPARENT = YES USE_MATHJAX = NO +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + # When MathJax is enabled you can set the default output format to be used for -# the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). # Possible values are: HTML-CSS (which is slower, but has the best -# compatibility), NativeMML (i.e. MathML) and SVG. +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1510,22 +1644,29 @@ MATHJAX_FORMAT = HTML-CSS # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of -# MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/. +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1553,7 +1694,7 @@ MATHJAX_CODEFILE = SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be -# implemented using a web server instead of a web client using Javascript. There +# implemented using a web server instead of a web client using JavaScript. There # are two flavors of web server based searching depending on the EXTERNAL_SEARCH # setting. When disabled, doxygen will generate a PHP script for searching and # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing @@ -1572,7 +1713,8 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). +# Xapian (see: +# https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1585,8 +1727,9 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). See the section "External Indexing and -# Searching" for details. +# Xapian (see: +# https://xapian.org/). See the section "External Indexing and Searching" for +# details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = @@ -1637,21 +1780,35 @@ LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. # -# Note that when enabling USE_PDFLATEX this option is only used for generating -# bitmaps for formulas in the HTML output, but not in the Makefile that is -# written to the output directory. -# The default file is: latex. +# Note that when not enabling USE_PDFLATEX the default is latex when enabling +# USE_PDFLATEX the default is pdflatex and when in the later case latex is +# chosen this is overwritten by pdflatex. For specific output languages the +# default can have been set differently, this depends on the implementation of +# the output language. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate # index for LaTeX. +# Note: This tag is used in the Makefile / make.bat. +# See also: LATEX_MAKEINDEX_CMD for the part in the generated output file +# (.tex). # The default file is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. MAKEINDEX_CMD_NAME = makeindex +# The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to +# generate index for LaTeX. In case there is no backslash (\) as first character +# it will be automatically added in the LaTeX code. +# Note: This tag is used in the generated output file (.tex). +# See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat. +# The default value is: makeindex. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_MAKEINDEX_CMD = makeindex + # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. @@ -1681,29 +1838,31 @@ PAPER_TYPE = a4 EXTRA_PACKAGES = amsmath -# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the -# generated LaTeX document. The header should contain everything until the first -# chapter. If it is left blank doxygen will generate a standard header. See -# section "Doxygen usage" for information on how to let doxygen write the -# default header to a separate file. +# The LATEX_HEADER tag can be used to specify a user-defined LaTeX header for +# the generated LaTeX document. The header should contain everything until the +# first chapter. If it is left blank doxygen will generate a standard header. It +# is highly recommended to start with a default header using +# doxygen -w latex new_header.tex new_footer.tex new_stylesheet.sty +# and then modify the file new_header.tex. See also section "Doxygen usage" for +# information on how to generate the default header that doxygen normally uses. # -# Note: Only use a user-defined header if you know what you are doing! The -# following commands have a special meaning inside the header: $title, -# $datetime, $date, $doxygenversion, $projectname, $projectnumber, -# $projectbrief, $projectlogo. Doxygen will replace $title with the empty -# string, for the replacement values of the other commands the user is referred -# to HTML_HEADER. +# Note: Only use a user-defined header if you know what you are doing! +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. The following +# commands have a special meaning inside the header (and footer): For a +# description of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = -# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the -# generated LaTeX document. The footer should contain everything after the last -# chapter. If it is left blank doxygen will generate a standard footer. See +# The LATEX_FOOTER tag can be used to specify a user-defined LaTeX footer for +# the generated LaTeX document. The footer should contain everything after the +# last chapter. If it is left blank doxygen will generate a standard footer. See # LATEX_HEADER for more information on how to generate a default footer and what -# special commands can be used inside the footer. -# -# Note: Only use a user-defined footer if you know what you are doing! +# special commands can be used inside the footer. See also section "Doxygen +# usage" for information on how to generate the default footer that doxygen +# normally uses. Note: Only use a user-defined footer if you know what you are +# doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = @@ -1736,9 +1895,11 @@ LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES -# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES, to get a -# higher quality PDF documentation. +# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as +# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX +# files. Set this option to YES, to get a higher quality PDF documentation. +# +# See also section LATEX_CMD_NAME for selecting the engine. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1746,8 +1907,7 @@ USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode # command to the generated LaTeX files. This will instruct LaTeX to keep running -# if errors occur, instead of asking the user for help. This option is also used -# when generating formulas in HTML. +# if errors occur, instead of asking the user for help. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1760,16 +1920,6 @@ LATEX_BATCHMODE = NO LATEX_HIDE_INDICES = NO -# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source -# code with syntax highlighting in the LaTeX output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_LATEX is set to YES. - -LATEX_SOURCE_CODE = NO - # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # https://en.wikipedia.org/wiki/BibTeX and \cite for more info. @@ -1786,6 +1936,14 @@ LATEX_BIB_STYLE = plain LATEX_TIMESTAMP = NO +# The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute) +# path from which the emoji images will be read. If a relative path is entered, +# it will be relative to the LATEX_OUTPUT directory. If left blank the +# LATEX_OUTPUT directory will be used. +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EMOJI_DIRECTORY = + #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- @@ -1825,9 +1983,9 @@ COMPACT_RTF = NO RTF_HYPERLINKS = NO -# Load stylesheet definitions from file. Syntax is similar to doxygen's config -# file, i.e. a series of assignments. You only have to provide replacements, -# missing definitions are set to their default value. +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# configuration file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. # # See also section "Doxygen usage" for information on how to generate the # default style sheet that doxygen normally uses. @@ -1836,22 +1994,12 @@ RTF_HYPERLINKS = NO RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an RTF document. Syntax is -# similar to doxygen's config file. A template extensions file can be generated -# using doxygen -e rtf extensionFile. +# similar to doxygen's configuration file. A template extensions file can be +# generated using doxygen -e rtf extensionFile. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_EXTENSIONS_FILE = -# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code -# with syntax highlighting in the RTF output. -# -# Note that which sources are shown also depends on other settings such as -# SOURCE_BROWSER. -# The default value is: NO. -# This tag requires that the tag GENERATE_RTF is set to YES. - -RTF_SOURCE_CODE = NO - #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- @@ -1923,6 +2071,13 @@ XML_OUTPUT = xml XML_PROGRAMLISTING = YES +# If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include +# namespace members in file scope as well, matching the HTML output. +# The default value is: NO. +# This tag requires that the tag GENERATE_XML is set to YES. + +XML_NS_MEMB_FILE_SCOPE = NO + #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- @@ -1941,15 +2096,6 @@ GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the -# program listings (including syntax highlighting and cross-referencing -# information) to the DOCBOOK output. Note that enabling this will significantly -# increase the size of the DOCBOOK output. -# The default value is: NO. -# This tag requires that the tag GENERATE_DOCBOOK is set to YES. - -DOCBOOK_PROGRAMLISTING = NO - #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- @@ -2124,34 +2270,10 @@ EXTERNAL_GROUPS = YES EXTERNAL_PAGES = YES -# The PERL_PATH should be the absolute path and name of the perl script -# interpreter (i.e. the result of 'which perl'). -# The default file (with absolute path) is: /usr/bin/perl. - -PERL_PATH = /usr/bin/perl - #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- -# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram -# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to -# NO turns the diagrams off. Note that this option also works with HAVE_DOT -# disabled, but it is recommended to install and use dot, since it yields more -# powerful graphs. -# The default value is: YES. - -CLASS_DIAGRAMS = YES - -# You can define message sequence charts within doxygen comments using the \msc -# command. Doxygen will then run the mscgen tool (see: -# http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the -# documentation. The MSCGEN_PATH tag allows you to specify the directory where -# the mscgen tool resides. If left empty the tool is assumed to be found in the -# default search path. - -MSCGEN_PATH = - # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. @@ -2208,11 +2330,14 @@ DOT_FONTSIZE = 10 DOT_FONTPATH = -# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for -# each documented class showing the direct and indirect inheritance relations. -# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. +# If the CLASS_GRAPH tag is set to YES (or GRAPH) then doxygen will generate a +# graph for each documented class showing the direct and indirect inheritance +# relations. In case HAVE_DOT is set as well dot will be used to draw the graph, +# otherwise the built-in generator will be used. If the CLASS_GRAPH tag is set +# to TEXT the direct and indirect inheritance relations will be shown as texts / +# links. +# Possible values are: NO, YES, TEXT and GRAPH. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. CLASS_GRAPH = YES @@ -2249,10 +2374,32 @@ UML_LOOK = NO # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. +# This tag requires that the tag UML_LOOK is set to YES. UML_LIMIT_NUM_FIELDS = 10 +# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and +# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS +# tag is set to YES, doxygen will add type and arguments for attributes and +# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen +# will not generate fields with class member information in the UML graphs. The +# class diagrams will look similar to the default class diagrams but using UML +# notation for the relationships. +# Possible values are: NO, YES and NONE. +# The default value is: NO. +# This tag requires that the tag UML_LOOK is set to YES. + +DOT_UML_DETAILS = NO + +# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters +# to display on a single line. If the actual line length exceeds this threshold +# significantly it will wrapped across multiple lines. Some heuristics are apply +# to avoid ugly line breaks. +# Minimum value: 0, maximum value: 1000, default value: 17. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_WRAP_THRESHOLD = 17 + # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. @@ -2319,6 +2466,13 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES +# The DIR_GRAPH_MAX_DEPTH tag can be used to limit the maximum number of levels +# of child directories generated in directory dependency graphs by dot. +# Minimum value: 1, maximum value: 25, default value: 1. +# This tag requires that the tag DIRECTORY_GRAPH is set to YES. + +DIR_GRAPH_MAX_DEPTH = 1 + # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. For an explanation of the image formats see the section # output formats in the documentation of the dot tool (Graphviz (see: @@ -2372,10 +2526,10 @@ MSCFILE_DIRS = DIAFILE_DIRS = # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the -# path where java can find the plantuml.jar file. If left blank, it is assumed -# PlantUML is not used or called during a preprocessing step. Doxygen will -# generate a warning when it encounters a \startuml command in this case and -# will not generate output for the diagram. +# path where java can find the plantuml.jar file or to the filename of jar file +# to be used. If left blank, it is assumed PlantUML is not used or called during +# a preprocessing step. Doxygen will generate a warning when it encounters a +# \startuml command in this case and will not generate output for the diagram. PLANTUML_JAR_PATH = @@ -2437,14 +2591,18 @@ DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page # explaining the meaning of the various boxes and arrows in the dot generated # graphs. +# Note: This tag requires that UML_LOOK isn't set, i.e. the doxygen internal +# graphical representation for inheritance and collaboration diagrams is used. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate # files that are used to generate the various graphs. +# +# Note: This setting is not only used for dot files but also for msc temporary +# files. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES diff --git a/docs/user.conf b/docs/user.conf new file mode 100644 index 000000000..c39f53a38 --- /dev/null +++ b/docs/user.conf @@ -0,0 +1,2634 @@ +# Doxyfile 1.9.3 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the configuration +# file that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# https://www.gnu.org/software/libiconv/ for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "ALP User Documentation" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = 0.7.0 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "Algebraic Programming User Documentation" + +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path +# into which the generated documentation will be written. If a relative path is +# entered, it will be relative to the location where doxygen was started. If +# left blank the current directory will be used. + +OUTPUT_DIRECTORY = docs/user + +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- +# directories (in 2 levels) under the output directory of each output format and +# will distribute the generated files over these directories. Enabling this +# option can be useful when feeding doxygen a huge amount of source files, where +# putting all generated files in the same directory would otherwise causes +# performance problems for the file system. +# The default value is: NO. + +CREATE_SUBDIRS = NO + +# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII +# characters to appear in the names of generated files. If set to NO, non-ASCII +# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode +# U+3044. +# The default value is: NO. + +ALLOW_UNICODE_NAMES = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, +# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), +# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, +# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, +# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, +# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, +# Ukrainian and Vietnamese. +# The default value is: English. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member +# descriptions after the members that are listed in the file and class +# documentation (similar to Javadoc). Set to NO to disable this. +# The default value is: YES. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief +# description of a member or function before the detailed description +# +# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. +# The default value is: YES. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator that is +# used to form the text in various listings. Each string in this list, if found +# as the leading text of the brief description, will be stripped from the text +# and the result, after processing the whole list, is used as the annotated +# text. Otherwise, the brief description is used as-is. If left blank, the +# following values are used ($name is automatically replaced with the name of +# the entity):The $name class, The $name widget, The $name file, is, provides, +# specifies, contains, represents, a, an and the. + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# doxygen will generate a detailed section even if there is only a brief +# description. +# The default value is: NO. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. +# The default value is: NO. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path +# before files name in the file list and in the header files. If set to NO the +# shortest path that makes the file name unique will be used +# The default value is: YES. + +FULL_PATH_NAMES = NO + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the +# path mentioned in the documentation of a class, which tells the reader which +# header file to include in order to use a class. If left blank only the name of +# the header file containing the class definition is used. Otherwise one should +# specify the list of include paths that are normally passed to the compiler +# using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but +# less readable) file names. This can be useful is your file systems doesn't +# support long names like on DOS, Mac, or CD-ROM. +# The default value is: NO. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the +# first line (until the first dot) of a Javadoc-style comment as the brief +# description. If set to NO, the Javadoc-style will behave just like regular Qt- +# style comments (thus requiring an explicit @brief command for a brief +# description.) +# The default value is: NO. + +JAVADOC_AUTOBRIEF = YES + +# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line +# such as +# /*************** +# as being the beginning of a Javadoc-style comment "banner". If set to NO, the +# Javadoc-style will behave just like regular comments and it will not be +# interpreted by doxygen. +# The default value is: NO. + +JAVADOC_BANNER = NO + +# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first +# line (until the first dot) of a Qt-style comment as the brief description. If +# set to NO, the Qt-style will behave just like regular Qt-style comments (thus +# requiring an explicit \brief command for a brief description.) +# The default value is: NO. + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a +# multi-line C++ special comment block (i.e. a block of //! or /// comments) as +# a brief description. This used to be the default behavior. The new default is +# to treat a multi-line C++ comment block as a detailed description. Set this +# tag to YES if you prefer the old behavior instead. +# +# Note that setting this tag to YES also means that rational rose comments are +# not recognized any more. +# The default value is: NO. + +MULTILINE_CPP_IS_BRIEF = NO + +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + +# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the +# documentation from any documented member that it re-implements. +# The default value is: YES. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. +# The default value is: NO. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen +# uses this value to replace tabs by spaces in code fragments. +# Minimum value: 1, maximum value: 16, default value: 4. + +TAB_SIZE = 4 + +# This tag can be used to specify a number of aliases that act as commands in +# the documentation. An alias has the form: +# name=value +# For example adding +# "sideeffect=@par Side Effects:^^" +# will allow you to put the command \sideeffect (or @sideeffect) in the +# documentation, which will result in a user-defined paragraph with heading +# "Side Effects:". Note that you cannot put \n's in the value part of an alias +# to insert newlines (in the resulting output). You can put ^^ in the value part +# of an alias to insert a newline as if a physical newline was in the original +# file. When you need a literal { or } or , in the value part of an alias you +# have to escape them by means of a backslash (\), this can lead to conflicts +# with the commands \{ and \} for these it is advised to use the version @{ and +# @} or use a double escape (\\{ and \\}) + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources +# only. Doxygen will then generate output that is more tailored for C. For +# instance, some of the names that are used will be different. The list of all +# members will be omitted, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_FOR_C = NO + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or +# Python sources only. Doxygen will then generate output that is more tailored +# for that language. For instance, namespaces will be presented as packages, +# qualified scopes will look different, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources. Doxygen will then generate output that is tailored for Fortran. +# The default value is: NO. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for VHDL. +# The default value is: NO. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice +# sources only. Doxygen will then generate output that is more tailored for that +# language. For instance, namespaces will be presented as modules, types will be +# separated into more groups, etc. +# The default value is: NO. + +OPTIMIZE_OUTPUT_SLICE = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given +# extension. Doxygen has a built-in mapping, but you can override or extend it +# using this tag. The format is ext=language, where ext is a file extension, and +# language is one of the parsers supported by doxygen: IDL, Java, JavaScript, +# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice, +# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: +# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser +# tries to guess whether the code is fixed or free formatted code, this is the +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. +# +# Note: For files without extension you can use no_extension as a placeholder. +# +# Note that for custom extensions you also need to set FILE_PATTERNS otherwise +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. + +EXTENSION_MAPPING = + +# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments +# according to the Markdown format, which allows for more readable +# documentation. See https://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you can +# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in +# case of backward compatibilities issues. +# The default value is: YES. + +MARKDOWN_SUPPORT = YES + +# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up +# to that level are automatically included in the table of contents, even if +# they do not have an id attribute. +# Note: This feature currently applies only to Markdown headings. +# Minimum value: 0, maximum value: 99, default value: 5. +# This tag requires that the tag MARKDOWN_SUPPORT is set to YES. + +TOC_INCLUDE_HEADINGS = 0 + +# When enabled doxygen tries to link words that correspond to documented +# classes, or namespaces to their corresponding documentation. Such a link can +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. +# The default value is: YES. + +AUTOLINK_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should set this +# tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); +# versus func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. +# The default value is: NO. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. +# The default value is: NO. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip (see: +# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen +# will parse them like normal C++ but will assume all classes use public instead +# of private inheritance when no explicit protection keyword is present. +# The default value is: NO. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate +# getter and setter methods for a property. Setting this option to YES will make +# doxygen to replace the get and set methods by a property in the documentation. +# This will only work if the methods are indeed getting or setting a simple +# type. If this is not the case, or you want to show the methods anyway, you +# should set this option to NO. +# The default value is: YES. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. +# The default value is: NO. + +DISTRIBUTE_GROUP_DOC = NO + +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + +# Set the SUBGROUPING tag to YES to allow class member groups of the same type +# (for instance a group of public functions) to be put as a subgroup of that +# type (e.g. under the Public Functions section). Set it to NO to prevent +# subgrouping. Alternatively, this can be done per class using the +# \nosubgrouping command. +# The default value is: YES. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions +# are shown inside the group in which they are included (e.g. using \ingroup) +# instead of on a separate page (for HTML and Man pages) or section (for LaTeX +# and RTF). +# +# Note that this feature does not work in combination with +# SEPARATE_MEMBER_PAGES. +# The default value is: NO. + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions +# with only public data fields or simple typedef fields will be shown inline in +# the documentation of the scope in which they are defined (i.e. file, +# namespace, or group documentation), provided this scope is documented. If set +# to NO, structs, classes, and unions are shown on a separate page (for HTML and +# Man pages) or section (for LaTeX and RTF). +# The default value is: NO. + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or +# enum is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically be +# useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. +# The default value is: NO. + +TYPEDEF_HIDES_STRUCT = NO + +# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This +# cache is used to resolve symbols given their name and scope. Since this can be +# an expensive process and often the same symbol appears multiple times in the +# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small +# doxygen will become slower. If the cache is too large, memory is wasted. The +# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range +# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 +# symbols. At the end of a run doxygen will report the cache usage and suggest +# the optimal cache size from a speed point of view. +# Minimum value: 0, maximum value: 9, default value: 0. + +LOOKUP_CACHE_SIZE = 0 + +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which effectively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in +# documentation are documented, even if no documentation was available. Private +# class members and static file members will be hidden unless the +# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. +# Note: This will also disable the warnings about undocumented members that are +# normally produced when WARNINGS is set to YES. +# The default value is: NO. + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will +# be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual +# methods of a class will be included in the documentation. +# The default value is: NO. + +EXTRACT_PRIV_VIRTUAL = NO + +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal +# scope will be included in the documentation. +# The default value is: NO. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be +# included in the documentation. +# The default value is: NO. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, +# only classes defined in header files are included. Does not have any effect +# for Java sources. +# The default value is: YES. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. If set to YES, local methods, +# which are defined in the implementation section but not in the interface are +# included in the documentation. If set to NO, only methods in the interface are +# included. +# The default value is: NO. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base name of +# the file that contains the anonymous namespace. By default anonymous namespace +# are hidden. +# The default value is: NO. + +EXTRACT_ANON_NSPACES = NO + +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all +# undocumented members inside documented classes or files. If set to NO these +# members will be included in the various overviews, but no documentation +# section is generated. This option has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. If set +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. +# The default value is: NO. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend +# declarations. If set to NO, these declarations will be included in the +# documentation. +# The default value is: NO. + +HIDE_FRIEND_COMPOUNDS = YES + +# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any +# documentation blocks found inside the body of a function. If set to NO, these +# blocks will be appended to the function's detailed documentation block. +# The default value is: NO. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation that is typed after a +# \internal command is included. If the tag is set to NO then the documentation +# will be excluded. Set it to YES to include the internal documentation. +# The default value is: NO. + +INTERNAL_DOCS = NO + +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. +# The default value is: system dependent. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with +# their full class and namespace scopes in the documentation. If set to YES, the +# scope will be hidden. +# The default value is: NO. + +HIDE_SCOPE_NAMES = YES + +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + +# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class +# will show which file needs to be included to use the class. +# The default value is: YES. + +SHOW_HEADERFILE = YES + +# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of +# the files that are included by a file in the documentation of that file. +# The default value is: YES. + +SHOW_INCLUDE_FILES = NO + +# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each +# grouped member an include statement to the documentation, telling the reader +# which file to include in order to use the member. +# The default value is: NO. + +SHOW_GROUPED_MEMB_INC = NO + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include +# files with double quotes in the documentation rather than with sharp brackets. +# The default value is: NO. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the +# documentation for inline members. +# The default value is: YES. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the +# (detailed) documentation of file and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. +# The default value is: YES. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief +# descriptions of file, namespace and class members alphabetically by member +# name. If set to NO, the members will appear in declaration order. Note that +# this will also influence the order of the classes in the class list. +# The default value is: NO. + +SORT_BRIEF_DOCS = YES + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the +# (brief and detailed) documentation of class members so that constructors and +# destructors are listed first. If set to NO the constructors will appear in the +# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. +# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief +# member documentation. +# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting +# detailed member documentation. +# The default value is: NO. + +SORT_MEMBERS_CTORS_1ST = YES + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy +# of group names into alphabetical order. If set to NO the group names will +# appear in their defined order. +# The default value is: NO. + +SORT_GROUP_NAMES = YES + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by +# fully-qualified names, including namespaces. If set to NO, the class list will +# be sorted only by class name, not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the alphabetical +# list. +# The default value is: NO. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper +# type resolution of all parameters of a function it will reject a match between +# the prototype and the implementation of a member function even if there is +# only one candidate or it is obvious which candidate to choose by doing a +# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still +# accept a match between prototype and implementation in such cases. +# The default value is: NO. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. +# The default value is: YES. + +GENERATE_TODOLIST = NO + +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. +# The default value is: YES. + +GENERATE_TESTLIST = NO + +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug +# list. This list is created by putting \bug commands in the documentation. +# The default value is: YES. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) +# the deprecated list. This list is created by putting \deprecated commands in +# the documentation. +# The default value is: YES. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional documentation +# sections, marked by \if ... \endif and \cond +# ... \endcond blocks. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the +# initial value of a variable or macro / define can have for it to appear in the +# documentation. If the initializer consists of more lines than specified here +# it will be hidden. Use a value of 0 to hide initializers completely. The +# appearance of the value of individual variables and macros / defines can be +# controlled using \showinitializer or \hideinitializer command in the +# documentation regardless of this setting. +# Minimum value: 0, maximum value: 10000, default value: 30. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. +# The default value is: YES. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This +# will remove the Files entry from the Quick Index and from the Folder Tree View +# (if specified). +# The default value is: YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces +# page. This will remove the Namespaces entry from the Quick Index and from the +# Folder Tree View (if specified). +# The default value is: YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command command input-file, where command is the value of the +# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided +# by doxygen. Whatever the program writes to standard output is used as the file +# version. For an example see the documentation. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. To create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. You can +# optionally specify a file name after the option, if omitted DoxygenLayout.xml +# will be used as the name of the layout file. See also section "Changing the +# layout of pages" for information. +# +# Note that if you run doxygen from a directory containing a file called +# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE +# tag is left empty. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files containing +# the reference definitions. This must be a list of .bib files. The .bib +# extension is automatically appended if omitted. This requires the bibtex tool +# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. +# For LaTeX the style of the bibliography can be controlled using +# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the +# search path. See also \cite for info how to create references. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# Configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated to +# standard output by doxygen. If QUIET is set to YES this implies that the +# messages are off. +# The default value is: NO. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES +# this implies that the warnings are on. +# +# Tip: Turn warnings on while writing the documentation. +# The default value is: YES. + +WARNINGS = YES + +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate +# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag +# will automatically be disabled. +# The default value is: YES. + +WARN_IF_UNDOCUMENTED = YES + +# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as documenting some parameters in +# a documented function twice, or documenting parameters that don't exist or +# using markup commands wrongly. +# The default value is: YES. + +WARN_IF_DOC_ERROR = YES + +# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete +# function parameter documentation. If set to NO, doxygen will accept that some +# parameters have no documentation without warning. +# The default value is: YES. + +WARN_IF_INCOMPLETE_DOC = YES + +# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that +# are documented, but have no documentation for their parameters or return +# value. If set to NO, doxygen will only warn about wrong parameter +# documentation, but not about the absence of documentation. If EXTRACT_ALL is +# set to YES then this flag will automatically be disabled. See also +# WARN_IF_INCOMPLETE_DOC +# The default value is: NO. + +WARN_NO_PARAMDOC = NO + +# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. +# The default value is: NO. + +WARN_AS_ERROR = NO + +# The WARN_FORMAT tag determines the format of the warning messages that doxygen +# can produce. The string should contain the $file, $line, and $text tags, which +# will be replaced by the file and line number from which the warning originated +# and the warning text. Optionally the format may contain $version, which will +# be replaced by the version of the file (if it could be obtained via +# FILE_VERSION_FILTER) +# The default value is: $file:$line: $text. + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning and error +# messages should be written. If left blank the output is written to standard +# error (stderr). In case the file specified cannot be opened for writing the +# warning and error messages are written to standard error. When as file - is +# specified the warning and error messages are written to standard output +# (stdout). + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING +# Note: If this tag is empty the current directory is searched. + +INPUT = include/graphblas.hpp \ + include/graphblas/base \ + include/graphblas/algorithms \ + include/graphblas/interfaces \ + include/transition \ + include/graphblas/descriptors.hpp \ + include/graphblas/semiring.hpp \ + include/graphblas/monoid.hpp \ + include/graphblas/iomode.hpp \ + include/graphblas/ops.hpp \ + include/graphblas/descriptors.hpp \ + include/graphblas/rc.hpp \ + include/graphblas/reference/config.hpp \ + include/graphblas/nonblocking/config.hpp \ + include/graphblas/bsp1d/config.hpp \ + include/graphblas/identities.hpp \ + include/graphblas/phase.hpp \ + include/graphblas/type_traits.hpp \ + include/graphblas/backends.hpp \ + include/graphblas/blas0.hpp #\ +# include/graphblas/utils \ +# include/graphblas/utils.hpp + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses +# libiconv (or the iconv built into libc) for the transcoding. See the libiconv +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. +# The default value is: UTF-8. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.l, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, +# *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C +# comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, +# *.vhdl, *.ucf, *.qsf and *.ice. + +FILE_PATTERNS = *.hpp \ + *.cpp \ + *.h \ + *.c + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = include/graphblas/base/alloc.hpp \ + include/graphblas/base/coordinates.hpp \ + include/graphblas/base/distribution.hpp \ + include/graphblas/base/internalops.hpp \ + include/graphblas/algorithms/hpcg #\ +# include/graphblas/base/init.hpp + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. +# The default value is: NO. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# ANamespace::AClass, ANamespace::*Test +# +# Note that the wildcards are matched against the file with absolute path, so to +# exclude all test directories use the pattern */test/* + +EXCLUDE_SYMBOLS = internal + +# The EXAMPLE_PATH tag can be used to specify one or more files or directories +# that contain example code fragments that are included (see the \include +# command). + +EXAMPLE_PATH = examples/sp.cpp \ + include/graphblas/ops.hpp \ + include/graphblas/internalops.hpp + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank all +# files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude commands +# irrespective of the value of the RECURSIVE tag. +# The default value is: NO. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or directories +# that contain images that are to be included in the documentation (see the +# \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command: +# +# +# +# where is the value of the INPUT_FILTER tag, and is the +# name of an input file. Doxygen will then use the output that the filter +# program writes to standard output. If FILTER_PATTERNS is specified, this tag +# will be ignored. +# +# Note that the filter must not add or remove lines; it is applied before the +# code is scanned, but not when the output code is generated. If lines are added +# or removed, the anchors will not be placed correctly. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: pattern=filter +# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how +# filters are used. If the FILTER_PATTERNS tag is empty or if none of the +# patterns match the file name, INPUT_FILTER is applied. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# properly processed by doxygen. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will also be used to filter the input files that are used for +# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). +# The default value is: NO. + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and +# it is also possible to disable source filtering for a specific pattern using +# *.ext= (so without naming a filter). +# This tag requires that the tag FILTER_SOURCE_FILES is set to YES. + +FILTER_SOURCE_PATTERNS = + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = + +#--------------------------------------------------------------------------- +# Configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will be +# generated. Documented entities will be cross-referenced with these sources. +# +# Note: To get rid of all source code in the generated output, make sure that +# also VERBATIM_HEADERS is set to NO. +# The default value is: NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body of functions, +# classes and enums directly into the documentation. +# The default value is: NO. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any +# special comment blocks from generated source code fragments. Normal C, C++ and +# Fortran comments will always remain visible. +# The default value is: YES. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES then for each documented +# entity all documented functions referencing it will be listed. +# The default value is: NO. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES then for each documented function +# all documented entities called/used by that function will be listed. +# The default value is: NO. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set +# to YES then the hyperlinks from functions in REFERENCES_RELATION and +# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will +# link to the documentation. +# The default value is: YES. + +REFERENCES_LINK_SOURCE = YES + +# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the +# source code will show a tooltip with additional information such as prototype, +# brief description and links to the definition and documentation. Since this +# will make the HTML file larger and loading of large files a bit slower, you +# can opt to disable this feature. +# The default value is: YES. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +SOURCE_TOOLTIPS = YES + +# If the USE_HTAGS tag is set to YES then the references to source code will +# point to the HTML generated by the htags(1) tool instead of doxygen built-in +# source browser. The htags tool is part of GNU's global source tagging system +# (see https://www.gnu.org/software/global/global.html). You will need version +# 4.8.6 or higher. +# +# To use it do the following: +# - Install the latest version of global +# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file +# - Make sure the INPUT points to the root of the source tree +# - Run doxygen as normal +# +# Doxygen will invoke htags (and that will in turn invoke gtags), so these +# tools must be available from the command line (i.e. in the search path). +# +# The result: instead of the source browser generated by doxygen, the links to +# source code will now point to the output of htags. +# The default value is: NO. +# This tag requires that the tag SOURCE_BROWSER is set to YES. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a +# verbatim copy of the header file for each class for which an include is +# specified. Set to NO to disable this. +# See also: Section \class. +# The default value is: YES. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all +# compounds will be generated. Enable this if the project contains a lot of +# classes, structs, unions or interfaces. +# The default value is: YES. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all classes will +# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag +# can be used to specify a prefix (or a list of prefixes) that should be ignored +# while generating the index headers. +# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output +# The default value is: YES. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a +# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of +# it. +# The default directory is: html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each +# generated HTML page (for example: .htm, .php, .asp). +# The default value is: .html. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a user-defined HTML header file for +# each generated HTML page. If the tag is left blank doxygen will generate a +# standard header. +# +# To get valid HTML the header file that includes any scripts and style sheets +# that doxygen needs, which is dependent on the configuration options used (e.g. +# the setting GENERATE_TREEVIEW). It is highly recommended to start with a +# default header using +# doxygen -w html new_header.html new_footer.html new_stylesheet.css +# YourConfigFile +# and then modify the file new_header.html. See also section "Doxygen usage" +# for information on how to generate the default header that doxygen normally +# uses. +# Note: The header is subject to change so you typically have to regenerate the +# default header when upgrading to a newer version of doxygen. For a description +# of the possible markers and block names see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each +# generated HTML page. If the tag is left blank doxygen will generate a standard +# footer. See HTML_HEADER for more information on how to generate a default +# footer and what special commands can be used inside the footer. See also +# section "Doxygen usage" for information on how to generate the default footer +# that doxygen normally uses. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style +# sheet that is used by each HTML page. It can be used to fine-tune the look of +# the HTML output. If left blank doxygen will generate a default style sheet. +# See also section "Doxygen usage" for information on how to generate the style +# sheet that doxygen normally uses. +# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as +# it is more robust and this tag (HTML_STYLESHEET) will in the future become +# obsolete. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_STYLESHEET = + +# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# cascading style sheets that are included after the standard style sheets +# created by doxygen. Using this option one can overrule certain style aspects. +# This is preferred over using HTML_STYLESHEET since it does not replace the +# standard style sheet and is therefore more robust against future updates. +# Doxygen will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). For an example see the documentation. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that the +# files will be copied as-is; there are no commands or markers available. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen +# will adjust the colors in the style sheet and background images according to +# this color. Hue is specified as an angle on a color-wheel, see +# https://en.wikipedia.org/wiki/Hue for more information. For instance the value +# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 +# purple, and 360 is red again. +# Minimum value: 0, maximum value: 359, default value: 220. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors +# in the HTML output. For a value of 0 the output will use gray-scales only. A +# value of 255 will produce the most vivid colors. +# Minimum value: 0, maximum value: 255, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the +# luminance component of the colors in the HTML output. Values below 100 +# gradually make the output lighter, whereas values above 100 make the output +# darker. The value divided by 100 is the actual gamma applied, so 80 represents +# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not +# change the gamma. +# Minimum value: 40, maximum value: 240, default value: 80. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting this +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_TIMESTAMP = YES + +# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML +# documentation will contain a main index with vertical navigation menus that +# are dynamically created via JavaScript. If disabled, the navigation index will +# consists of multiple levels of tabs that are statically embedded in every HTML +# page. Disable this option to support browsers that do not have JavaScript, +# like the Qt help browser. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_MENUS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_DYNAMIC_SECTIONS = NO + +# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries +# shown in the various tree structured indices initially; the user can expand +# and collapse entries dynamically later on. Doxygen will expand the tree to +# such a level that at most the specified number of entries are visible (unless +# a fully collapsed tree already exceeds this amount). So setting the number of +# entries 1 will produce a full collapsed tree by default. 0 is a special value +# representing an infinite number of entries and will result in a full expanded +# tree by default. +# Minimum value: 0, maximum value: 9999, default value: 100. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_INDEX_NUM_ENTRIES = 100 + +# If the GENERATE_DOCSET tag is set to YES, additional index files will be +# generated that can be used as input for Apple's Xcode 3 integrated development +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at +# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy +# genXcode/_index.html for more information. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_DOCSET = NO + +# This tag determines the name of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# The default value is: Doxygen generated docs. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# This tag determines the URL of the docset feed. A documentation feed provides +# an umbrella under which multiple documentation sets from a single provider +# (such as a company or product suite) can be grouped. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_FEEDURL = + +# This tag specifies a string that should uniquely identify the documentation +# set bundle. This should be a reverse domain-name style string, e.g. +# com.mycompany.MyDocSet. Doxygen will append .docset to the name. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. +# The default value is: org.doxygen.Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. +# The default value is: Publisher. +# This tag requires that the tag GENERATE_DOCSET is set to YES. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three +# additional HTML index files: index.hhp, index.hhc, and index.hhk. The +# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop +# on Windows. In the beginning of 2021 Microsoft took the original page, with +# a.o. the download links, offline the HTML help workshop was already many years +# in maintenance mode). You can download the HTML help workshop from the web +# archives at Installation executable (see: +# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo +# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe). +# +# The HTML Help Workshop contains a compiler that can convert all HTML output +# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML +# files are now used as the Windows 98 help format, and will replace the old +# Windows help format (.hlp) on all Windows platforms in the future. Compressed +# HTML files also contain an index, a table of contents, and you can search for +# words in the documentation. The HTML workshop also contains a viewer for +# compressed HTML files. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_HTMLHELP = NO + +# The CHM_FILE tag can be used to specify the file name of the resulting .chm +# file. You can add a path in front of the file if the result should not be +# written to the html output directory. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_FILE = + +# The HHC_LOCATION tag can be used to specify the location (absolute path +# including file name) of the HTML help compiler (hhc.exe). If non-empty, +# doxygen will try to run the HTML help compiler on the generated index.hhp. +# The file has to be specified with full path. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +HHC_LOCATION = + +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the main .chm file (NO). +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +GENERATE_CHI = NO + +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) +# and project file content. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +CHM_INDEX_ENCODING = + +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it +# enables the Previous and Next buttons. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members to +# the table of contents of the HTML help documentation and to the tree view. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTMLHELP is set to YES. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that +# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help +# (.qch) of the generated HTML documentation. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify +# the file name of the resulting .qch file. The path specified is relative to +# the HTML output folder. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help +# Project output. For more information please see Qt Help Project / Namespace +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt +# Help Project output. For more information please see Qt Help Project / Virtual +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). +# The default value is: doc. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_VIRTUAL_FOLDER = doc + +# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom +# filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see Qt Help Project / Custom +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's filter section matches. Qt Help Project / Filter Attributes (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHP_SECT_FILTER_ATTRS = + +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. +# This tag requires that the tag GENERATE_QHP is set to YES. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be +# generated, together with the HTML files, they form an Eclipse help plugin. To +# install this plugin and make it available under the help contents menu in +# Eclipse, the contents of the directory containing the HTML and XML files needs +# to be copied into the plugins directory of eclipse. The name of the directory +# within the plugins directory should be the same as the ECLIPSE_DOC_ID value. +# After copying Eclipse needs to be restarted before the help appears. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the Eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have this +# name. Each documentation set should have its own identifier. +# The default value is: org.doxygen.Project. +# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# If you want full control over the layout of the generated HTML pages it might +# be necessary to disable the index and replace it with your own. The +# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top +# of each HTML page. A value of NO enables the index and the value YES disables +# it. Since the tabs in the index contain the same information as the navigation +# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. If the tag +# value is set to YES, a side panel will be generated containing a tree-like +# index structure (just like the one that is generated for HTML Help). For this +# to work a browser that supports JavaScript, DHTML, CSS and frames is required +# (i.e. any modern browser). Windows users are probably better off using the +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can +# further fine tune the look of the index (see "Fine-tuning the output"). As an +# example, the default style sheet generated by doxygen has an example that +# shows how to put an image at the root of the tree instead of the PROJECT_NAME. +# Since the tree basically has the same information as the tab index, you could +# consider setting DISABLE_INDEX to YES when enabling this option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +GENERATE_TREEVIEW = NO + +# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the +# FULL_SIDEBAR option determines if the side bar is limited to only the treeview +# area (value NO) or if it should extend to the full height of the window (value +# YES). Setting this to YES gives a layout similar to +# https://docs.readthedocs.io with more room for contents, but less room for the +# project logo, title, and description. If either GENERATE_TREEVIEW or +# DISABLE_INDEX is set to NO, this option has no effect. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FULL_SIDEBAR = NO + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that +# doxygen will group on one line in the generated HTML documentation. +# +# Note that a value of 0 will completely suppress the enum values from appearing +# in the overview section. +# Minimum value: 0, maximum value: 20, default value: 4. +# This tag requires that the tag GENERATE_HTML is set to YES. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used +# to set the initial width (in pixels) of the frame in which the tree is shown. +# Minimum value: 0, maximum value: 1500, default value: 250. +# This tag requires that the tag GENERATE_HTML is set to YES. + +TREEVIEW_WIDTH = 250 + +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to +# external symbols imported via tag files in a separate window. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +EXT_LINKS_IN_WINDOW = NO + +# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email +# addresses. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +OBFUSCATE_EMAILS = YES + +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + +# Use this tag to change the font size of LaTeX formulas included as images in +# the HTML documentation. When you change the font size after a successful +# doxygen run you need to manually remove any form_*.png images from the HTML +# output directory to force them to be regenerated. +# Minimum value: 8, maximum value: 50, default value: 10. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANSPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are not +# supported properly for IE 6.0, but are supported on all modern browsers. +# +# Note that when changing this option you need to delete any form_*.png files in +# the HTML output directory before the changes have effect. +# The default value is: YES. +# This tag requires that the tag GENERATE_HTML is set to YES. + +FORMULA_TRANSPARENT = YES + +# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands +# to create new LaTeX commands to be used in formulas as building blocks. See +# the section "Including formulas" for details. + +FORMULA_MACROFILE = + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# https://www.mathjax.org) which uses client side JavaScript for the rendering +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = YES + +# With MATHJAX_VERSION it is possible to specify the MathJax version to be used. +# Note that the different versions of MathJax have different requirements with +# regards to the different settings, so it is possible that also other MathJax +# settings have to be changed when switching between the different MathJax +# versions. +# Possible values are: MathJax_2 and MathJax_3. +# The default value is: MathJax_2. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_VERSION = MathJax_2 + +# When MathJax is enabled you can set the default output format to be used for +# the MathJax output. For more details about the output format see MathJax +# version 2 (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3 +# (see: +# http://docs.mathjax.org/en/latest/web/components/output.html). +# Possible values are: HTML-CSS (which is slower, but has the best +# compatibility. This is the name for Mathjax version 2, for MathJax version 3 +# this will be translated into chtml), NativeMML (i.e. MathML. Only supported +# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This +# is the name for Mathjax version 3, for MathJax version 2 this will be +# translated into HTML-CSS) and SVG. +# The default value is: HTML-CSS. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_FORMAT = HTML-CSS + +# When MathJax is enabled you need to specify the location relative to the HTML +# output directory using the MATHJAX_RELPATH option. The destination directory +# should contain the MathJax.js script. For instance, if the mathjax directory +# is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax +# Content Delivery Network so you can quickly see the result without installing +# MathJax. However, it is strongly recommended to install a local copy of +# MathJax from https://www.mathjax.org before deployment. The default value is: +# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2 +# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3 +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax +# extension names that should be enabled during MathJax rendering. For example +# for MathJax version 2 (see +# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions): +# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols +# For example for MathJax version 3 (see +# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html): +# MATHJAX_EXTENSIONS = ams +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_EXTENSIONS = + +# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces +# of code that will be used on startup of the MathJax code. See the MathJax site +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an +# example see the documentation. +# This tag requires that the tag USE_MATHJAX is set to YES. + +MATHJAX_CODEFILE = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box for +# the HTML output. The underlying search engine uses javascript and DHTML and +# should work on any modern browser. Note that when using HTML help +# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) +# there is already a search function so this one should typically be disabled. +# For large projects the javascript based search engine can be slow, then +# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to +# search using the keyboard; to jump to the search box use + S +# (what the is depends on the OS and browser, but it is typically +# , /