diff --git a/.github/actions/create-upload-suggestions/action.yml b/.github/actions/create-upload-suggestions/action.yml index ec9af1e29e8..0696e06589b 100644 --- a/.github/actions/create-upload-suggestions/action.yml +++ b/.github/actions/create-upload-suggestions/action.yml @@ -225,7 +225,7 @@ runs: steps.upload-changes.outputs.artifact-url }}) - name: Fail action if some files were changed if: >- - ${{ (steps.files_changed.outputs.files_changed == 'true') && + ${{ (steps.files_changed.outputs.files_changed == 'true') && (steps.inputs.outputs.fail-if-changed == 'true') }} shell: bash run: | diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml index b034d72b0fc..18dceb1a6b1 100644 --- a/.github/workflows/coverity.yml +++ b/.github/workflows/coverity.yml @@ -1,10 +1,10 @@ name: Coverity Scan on: workflow_dispatch: # run whenever a contributor calls it - schedule: + schedule: - cron: '48 5 * * *' # Run at 05:48 # Coverity will let GRASS do a scan a maximum of twice per day, so this schedule will help GRASS fit within that limit with some additional space for manual runs - + jobs: build: runs-on: [ ubuntu-latest ] diff --git a/.github/workflows/post-pr-reviews.yml b/.github/workflows/post-pr-reviews.yml index fa61cd88b6c..b8f5af32b43 100644 --- a/.github/workflows/post-pr-reviews.yml +++ b/.github/workflows/post-pr-reviews.yml @@ -3,7 +3,7 @@ name: Post PR code suggestions on: workflow_run: - workflows: ["ClangFormat Check"] + workflows: ["ClangFormat Check", "Python Code Quality"] types: - completed permissions: {} @@ -27,7 +27,48 @@ jobs: github-token: ${{ github.token }} run-id: ${{github.event.workflow_run.id }} - uses: reviewdog/action-setup@3f401fe1d58fe77e10d665ab713057375e39b887 # v1.3.0 - - run: | + - name: Check what tools have suggestions to post + # Using this pattern to have expected file names explicitly named + id: tools + run: | + for tool_name in $INPUT_TOOL_NAMES + do + INPUT_TOOL_NAME_FILE="diff-${tool_name}.patch" + echo "Checking if tool ${tool_name} left suggestions in ${INPUT_TOOL_NAME_FILE}..." + if [[ -f "${INPUT_TOOL_NAME_FILE}" ]]; then + echo " ${INPUT_TOOL_NAME_FILE} was found for tool ${tool_name}" + echo "$tool_name=true" >> "${GITHUB_OUTPUT}" + else + echo " ${INPUT_TOOL_NAME_FILE} was not found for tool ${tool_name}" + echo "$tool_name=false" >> "${GITHUB_OUTPUT}" + fi + done + env: + INPUT_TOOL_NAMES: >- + black + clang-format + - name: Post Black suggestions + if: ${{ steps.tools.outputs.black == 'true' }} + run: | + TMPFILE="diff-${INPUT_TOOL_NAME}.patch" + GITHUB_ACTIONS="" reviewdog \ + -name="${INPUT_TOOL_NAME:-reviewdog-suggester}" \ + -f=diff \ + -f.diff.strip=1 \ + -filter-mode=nofilter \ + -guess \ + -reporter="github-pr-review" < "${TMPFILE}" + env: + INPUT_TOOL_NAME: black + REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CI_COMMIT: ${{ github.event.workflow_run.head_sha }} + CI_REPO_OWNER: ${{ github.event.workflow_run.repository.owner.login }} + CI_REPO_NAME: ${{ github.event.workflow_run.repository.name }} + # CI_PULL_REQUEST: "" # Populated from reviewdog's "-guess" flag since hard to get + - name: Post ClangFormat suggestions + if: ${{ steps.tools.outputs.clang-format == 'true' }} + run: | + TMPFILE="diff-${INPUT_TOOL_NAME}.patch" GITHUB_ACTIONS="" reviewdog \ -name="${INPUT_TOOL_NAME:-reviewdog-suggester}" \ -f=diff \ @@ -36,7 +77,6 @@ jobs: -guess \ -reporter="github-pr-review" < "${TMPFILE}" env: - TMPFILE: diff-clang-format.patch INPUT_TOOL_NAME: clang-format REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} CI_COMMIT: ${{ github.event.workflow_run.head_sha }} diff --git a/.github/workflows/python-code-quality.yml b/.github/workflows/python-code-quality.yml index ade29d1114f..438a21421f0 100644 --- a/.github/workflows/python-code-quality.yml +++ b/.github/workflows/python-code-quality.yml @@ -56,6 +56,23 @@ jobs: with: python-version: ${{ matrix.python-version }} cache: pip + - name: Upgrade pip + run: python -m pip install --upgrade pip + + - name: Install Black only + run: pip install black==${{ matrix.black-version }} + + - name: Run Black + run: black . + + - name: Create and uploads code suggestions to apply for Black + # Will fail fast here if there are changes required + id: diff-black + uses: ./.github/actions/create-upload-suggestions + with: + tool-name: black + # To keep repo's file structure in formatted changes artifact + extra-upload-changes: .clang-format - name: Install non-Python dependencies run: | @@ -66,18 +83,12 @@ jobs: - name: Install Python dependencies run: | - python -m pip install --upgrade pip pip install -r .github/workflows/python_requirements.txt pip install -r .github/workflows/optional_requirements.txt - pip install black==${{ matrix.black-version }} pip install flake8==${{ matrix.flake8-version }} pip install pylint==${{ matrix.pylint-version }} pytest-github-actions-annotate-failures pip install bandit[sarif]==${{matrix.bandit-version}} - - name: Run Black - run: | - black --check --diff . - - name: Run Flake8 run: | flake8 --count --statistics --show-source --jobs=$(nproc) . diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 4a55fdee7c6..d961b034627 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -96,7 +96,7 @@ jobs: - name: Add extra exclusions to a gunittest config file run: | - sed 's:exclude =:exclude = ${{ + sed 's:exclude =:exclude = ${{ steps.get-exclude.outputs.extra-exclude }}:g' .gunittest.cfg > .gunittest.extra.cfg cat .gunittest.extra.cfg diff --git a/.gitignore b/.gitignore index 9a44ace144b..2cc7f3da721 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ config.log config.status* error.log grass.pc +autom4te.cache/ # ignore specific file generated during make gui/wxpython/menustrings.py @@ -50,3 +51,10 @@ test_keyvalue_result.txt # ignore paths generated by helper tools node_modules include/VERSION_GIT + +# ignore autogenerated documentation +python/grass/docs/_build/ +html/ +latex/ +lib/*/html/ +lib/*/latex/ diff --git a/docker/alpine/Dockerfile b/docker/alpine/Dockerfile index 76b7668ae1c..fa05e3215cf 100644 --- a/docker/alpine/Dockerfile +++ b/docker/alpine/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3-alpine3.19@sha256:c7eb5c92b7933fe52f224a91a1ced27b91840ac9c69c58bef40d602156bcdb41 as common +FROM alpine:3.19@sha256:c5b1261d6d3e43071626931fc004f70149baeba2c8ec672bd4f27761f8e1ad6b as common # Based on: # https://github.com/mundialis/docker-grass-gis/blob/master/Dockerfile @@ -56,6 +56,7 @@ ENV GRASS_RUN_PACKAGES="\ openblas \ py3-numpy \ py3-pillow \ + python3 \ pdal \ pdal-dev \ postgresql15-client \ @@ -73,18 +74,8 @@ ENV GRASS_RUN_PACKAGES="\ WORKDIR /src -ENV PYTHONBIN=python$PYTHON_VERSION - -RUN echo "Install Python";\ - apk add --no-cache $PYTHONBIN && \ - $PYTHONBIN -m ensurepip && \ - rm -r /usr/lib/python*/ensurepip && \ - pip$PYTHON_VERSION install --no-cache-dir --upgrade pip setuptools && \ - if [ ! -e /usr/bin/pip ]; then ln -s pip$PYTHON_VERSION /usr/bin/pip ; fi && \ - if [ ! -e /usr/bin/python ]; then ln -sf /usr/bin/$PYTHONBIN /usr/bin/python; fi && \ - rm -r /root/.cache; \ - # Add the packages - echo "Install main packages";\ +# Add the packages +RUN echo "Install main packages";\ apk update; \ apk add --no-cache $GRASS_RUN_PACKAGES @@ -215,15 +206,14 @@ COPY --from=build /usr/local/grass* /usr/local/grass/ COPY --from=build /usr/lib/gdalplugins/*_GRASS.so /usr/lib/gdalplugins/ # run simple LAZ test COPY docker/testdata/simple.laz /tmp/ -COPY docker/testdata/test_grass_session.py docker/alpine/grass_tests.sh /scripts/ -COPY docker/testdata/test_grass_session.py /scripts/ +COPY docker/testdata/test_grass_python.py docker/alpine/grass_tests.sh /scripts/ +COPY docker/testdata/test_grass_python.py /scripts/ # install external Python API -RUN pip3 install --no-cache-dir --upgrade pip six grass-session --ignore-installed six; \ - ln -sf /usr/local/grass $(grass --config path); \ +RUN ln -sf /usr/local/grass $(grass --config path); \ # run some tests and cleanup $SHELL /scripts/grass_tests.sh \ - && rm -f /scripts/grass_tests.sh /tmp/simple.laz /scripts/test_grass_session.py; \ + && rm -f /scripts/grass_tests.sh /tmp/simple.laz /scripts/test_grass_python.py; \ # delete unused packages apk del --no-cache gettext pdal-dev; \ # show installed version diff --git a/imagery/i.pca/testsuite/test_pca.py b/imagery/i.pca/testsuite/test_pca.py new file mode 100644 index 00000000000..9da7991f876 --- /dev/null +++ b/imagery/i.pca/testsuite/test_pca.py @@ -0,0 +1,174 @@ +""" +Name: i.pca test +Purpose: Tests i.pca. + +Author: Hamed Elgizery - hamedashraf2004@gmail.com +Copyright: (C) 2024 by Hamed Elgizery and the GRASS Development Team +Licence: This program is free software under the GNU General Public + License (>=v2). Read the file COPYING that comes with GRASS + for details. +""" +from grass.gunittest.case import TestCase + + +class TestReport(TestCase): + @classmethod + def setUpClass(cls): + """Use temporary region settings""" + cls.runModule("g.region", raster="lsat7_2002_10@PERMANENT") + cls.use_temp_region() + + @classmethod + def tearDownClass(cls): + cls.runModule( + "g.remove", + flags="f", + type="raster", + name="lsat7_2002_pca.1,lsat7_2002_pca.2,lsat7_2002_pca.3,lsat7_2002_pca.4,lsat7_2002_pca.6", + ) + cls.del_temp_region() + + def test_pca_sample(self): + """Testing pca sample""" + self.assertModule( + "i.pca", + input="lsat7_2002_10,lsat7_2002_20,lsat7_2002_30,lsat7_2002_40,lsat7_2002_50,lsat7_2002_70", + output="lsat7_2002_pca", + ) + + lsat7_2002_pca_info_out = """north=228513 + south=214975.5 + east=645012 + west=629992.5 + nsres=28.5 + ewres=28.5 + rows=475 + cols=527 + cells=250325 + datatype=CELL + ncats=0 + comments=\"Eigen values, (vectors), and [percent importance]:PC1 4334.35 ( 0.2824, 0.3342, 0.5092,-0.0087, 0.5264, 0.5217) [83.04%]PC2 588.31 ( 0.2541, 0.1885, 0.2923,-0.7428,-0.5110,-0.0403) [11.27%]PC3 239.22 ( 0.3801, 0.3819, 0.2681, 0.6238,-0.4000,-0.2980) [ 4.58%]PC4 32.85 ( 0.1752,-0.0191,-0.4053, 0.1593,-0.4435, 0.7632) [ 0.63%]PC5 20.73 (-0.6170,-0.2514, 0.6059, 0.1734,-0.3235, 0.2330) [ 0.40%]PC6 4.08 (-0.5475, 0.8021,-0.2282,-0.0607,-0.0208, 0.0252) [ 0.08%]i.pca input=\"lsat7_2002_10,lsat7_2002_20,lsat7_2002_30,lsat7_2002_40\,lsat7_2002_50,lsat7_2002_70\" output=\"lsat7_2002_pca\" rescale=0,255 \percent=99" """ + + lsat7_2002_pca_univar_out = [ + """n=250325 + null_cells=0 + cells=250325 + min=0 + max=255 + range=255 + mean=60.6958074503146 + mean_of_abs=60.6958074503146 + stddev=32.8850846003739 + variance=1081.42878917375 + coeff_var=54.1801583697417 + sum=15193678 + first_quartile=36 + median=51 + third_quartile=77 + percentile_90=101""", + """n=250325 + null_cells=0 + cells=250325 + min=0 + max=255 + range=255 + mean=106.099418755618 + mean_of_abs=106.099418755618 + stddev=26.4487056926998 + variance=699.534032819051 + coeff_var=24.928228639612 + sum=26559337 + first_quartile=88 + median=104 + third_quartile=121 + percentile_90=137""", + """n=250325 + null_cells=0 + cells=250325 + min=0 + max=255 + range=255 + mean=74.1768980325577 + mean_of_abs=74.1768980325577 + stddev=14.1956266450161 + variance=201.515815844691 + coeff_var=19.1375307158104 + sum=18568332 + first_quartile=67 + median=74 + third_quartile=81 + percentile_90=88""", + """n=250325 + null_cells=0 + cells=250325 + min=0 + max=255 + range=255 + mean=113.285145311096 + mean_of_abs=113.285145311096 + stddev=10.689092045444 + variance=114.256688755974 + coeff_var=9.43556369733241 + sum=28358104 + first_quartile=109 + median=114 + third_quartile=118 + percentile_90=122""", + """n=250325 + null_cells=0 + cells=250325 + min=0 + max=255 + range=255 + mean=110.346713272745 + mean_of_abs=110.346713272745 + stddev=8.43087149474902 + variance=71.0795941609716 + coeff_var=7.64034672596938 + sum=27622541 + first_quartile=106 + median=110 + third_quartile=114 + percentile_90=118""", + """n=250325 + null_cells=0 + cells=250325 + min=0 + max=255 + range=255 + mean=115.238465994208 + mean_of_abs=115.238465994208 + stddev=8.97064489504434 + variance=80.4724698329851 + coeff_var=7.78441887233665 + sum=28847069 + first_quartile=110 + median=115 + third_quartile=121 + percentile_90=126""", + ] + + for i in range(1, 7): + # Asserting the results givien from r.info + self.assertRasterFitsInfo( + raster=f"lsat7_2002_pca.{i}", + reference=lsat7_2002_pca_info_out, + precision=3, + ) + + # Asserting the results givien from r.univar + univar_out = lsat7_2002_pca_univar_out[i - 1] + self.assertModuleKeyValue( + "r.univar", + flags="eg", + map=f"lsat7_2002_pca.{i}", + reference=univar_out, + precision=3, + sep="=", + ) + + +if __name__ == "__main__": + from grass.gunittest.main import test + + test() diff --git a/locale/po/grasswxpy_it.po b/locale/po/grasswxpy_it.po index c6fe9e3a02d..c1851867868 100644 --- a/locale/po/grasswxpy_it.po +++ b/locale/po/grasswxpy_it.po @@ -9,8 +9,8 @@ msgstr "" "Project-Id-Version: grasswxpy_it\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2024-02-09 06:51-0700\n" -"PO-Revision-Date: 2023-04-04 21:08+0000\n" -"Last-Translator: Markus \n" +"PO-Revision-Date: 2024-03-28 22:45+0000\n" +"Last-Translator: luca \n" "Language-Team: Italian \n" "Language: it\n" @@ -18,7 +18,7 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=n != 1;\n" -"X-Generator: Weblate 4.16.4\n" +"X-Generator: Weblate 5.3\n" #: ../gui/wxpython/nviz/tools.py:109 ../gui/wxpython/nviz/preferences.py:65 #: ../gui/wxpython/nviz/preferences.py:69 @@ -712,7 +712,7 @@ msgstr "Vuoi registrare una nuova animazione senza salvare la precedente?" #: ../gui/wxpython/nviz/tools.py:2591 msgid "Animation already exists" -msgstr "" +msgstr "L'animazione esiste già" #: ../gui/wxpython/nviz/tools.py:2757 msgid "No file prefix given." @@ -2369,9 +2369,8 @@ msgid "Re&load maps" msgstr "" #: ../gui/wxpython/datacatalog/tree.py:2178 -#, fuzzy msgid "&Copy path to mapset" -msgstr "&Crea mapset" +msgstr "&Copia percorso al mapset" #: ../gui/wxpython/datacatalog/tree.py:2189 msgid "&Create mapset" @@ -2410,9 +2409,8 @@ msgid "&Delete mapsets" msgstr "&Elimina mapsets" #: ../gui/wxpython/datacatalog/tree.py:2282 -#, fuzzy msgid "&Copy paths to mapsets" -msgstr "&Crea mapset" +msgstr "&Copia percorsi ai mapsets" #: ../gui/wxpython/datacatalog/tree.py:2292 msgid "No available options" @@ -2689,9 +2687,8 @@ msgid "Double-click to open the tool" msgstr "Doppio click o Ctrl-Enter per eseguire il modulo selezionato" #: ../gui/wxpython/history/tree.py:120 -#, fuzzy msgid "&Remove" -msgstr "Rimuovi" +msgstr "&Rimuovi" #: ../gui/wxpython/history/tree.py:179 msgid "Cannot be parsed into command" @@ -2714,7 +2711,7 @@ msgstr "Spostando <{name}>" #: ../gui/wxpython/history/tree.py:203 msgid "<{}> removed" -msgstr "" +msgstr "<{}> rimosso" #: ../gui/wxpython/history/browser.py:46 #, fuzzy @@ -2722,9 +2719,8 @@ msgid "History browser" msgstr "Naviga nei dati" #: ../gui/wxpython/history/browser.py:68 -#, fuzzy msgid "&Export history" -msgstr "Esporta a:" +msgstr "&Esporta storia dei comandi" #: ../gui/wxpython/history/browser.py:70 #, fuzzy @@ -3872,7 +3868,7 @@ msgstr "Successivo: %(forw)s, Precedente: %(back)s" #: ../gui/wxpython/image2target/ii2t_toolbars.py:40 #: ../gui/wxpython/photo2image/ip2i_toolbars.py:40 msgid "Add new GCP to the list" -msgstr "" +msgstr "Aggiungi nuovo GCP alla lista" #: ../gui/wxpython/gcp/toolbars.py:41 #: ../gui/wxpython/image2target/ii2t_toolbars.py:41 @@ -3915,7 +3911,7 @@ msgstr "" #: ../gui/wxpython/image2target/ii2t_toolbars.py:117 #: ../gui/wxpython/photo2image/ip2i_toolbars.py:117 msgid " / Zoom to map" -msgstr "" +msgstr " /Zoom alla mappa" #: ../gui/wxpython/gcp/toolbars.py:134 #: ../gui/wxpython/image2target/ii2t_toolbars.py:134 @@ -4545,11 +4541,11 @@ msgstr "Esporta modello in un script Python" #: ../gui/wxpython/gmodeler/toolbars.py:52 msgid "Add GRASS tool (module) to model" -msgstr "" +msgstr "Aggiungi uno strumento (modulo) di GRASS al modello" #: ../gui/wxpython/gmodeler/toolbars.py:54 msgid "Add data to model" -msgstr "" +msgstr "Aggiungi dati al modello" #: ../gui/wxpython/gmodeler/toolbars.py:57 msgid "Manually define relation between data and commands" @@ -4557,11 +4553,11 @@ msgstr "" #: ../gui/wxpython/gmodeler/toolbars.py:59 msgid "Add loop/series to model" -msgstr "" +msgstr "Aggiungi cicli/serie al modello" #: ../gui/wxpython/gmodeler/toolbars.py:60 msgid "Add comment to model" -msgstr "" +msgstr "Aggiungi commento al modello" #: ../gui/wxpython/gmodeler/toolbars.py:61 ../gui/wxpython/menustrings.py:201 #: ../gui/wxpython/menustrings.py:2094 @@ -4632,6 +4628,9 @@ msgid "" "\n" "Unable to add new action to the model." msgstr "" +"'%s' non è uno strumento di GRASS.\n" +"\n" +"Impossibile aggiungere una nuova azione al modello." #: ../gui/wxpython/gmodeler/dialogs.py:359 msgid "Relation properties" @@ -10997,7 +10996,7 @@ msgid "Abort running command" msgstr "Ferma l'esecuzione del comando" #: ../gui/wxpython/gui_core/goutput.py:374 -#, fuzzy, python-format +#, python-format msgid "%(txt)s (*.txt)|*.txt|%(files)s (*)|*" msgstr "%(txt)s (*.txt)|*.txt|%(files)s (*)|*" @@ -11217,7 +11216,7 @@ msgstr "" #: ../gui/wxpython/gui_core/preferences.py:392 msgid "At startup load maps from current mapset only (in the Data tab)" -msgstr "" +msgstr "All'avvio carica mappe solo dal mapset in uso (nel pannello Dati)" #: ../gui/wxpython/gui_core/preferences.py:407 msgid "Workspace settings" @@ -11771,9 +11770,8 @@ msgid "Map Display Settings" msgstr "Impostazioni della mappa" #: ../gui/wxpython/gui_core/toolbars.py:86 -#, fuzzy msgid "(Un)dock Map Display" -msgstr "Visualizzatore di mappe" +msgstr "" #: ../gui/wxpython/gui_core/wrap.py:258 msgid "Clear" @@ -11793,7 +11791,7 @@ msgstr "&Chiudi" #: ../gui/wxpython/gui_core/wrap.py:297 msgid "Apply" -msgstr "" +msgstr "Applica" #: ../gui/wxpython/gui_core/wrap.py:299 #: ../gui/wxpython/web_services/dialogs.py:692 @@ -12835,9 +12833,8 @@ msgid "Digitize new area (boundary without category) (Ctrl+A)" msgstr "Digitalizza nuova area (confine senza categoria)" #: ../gui/wxpython/vdigit/toolbars.py:179 -#, fuzzy msgid "Add new vertex to line or boundary (Ctrl+V)" -msgstr "Aggiungi nuovo vertice a linee o confini" +msgstr "Aggiungi nuovo vertice a linee o confini (Ctrl+V)" #: ../gui/wxpython/vdigit/toolbars.py:180 #: ../gui/wxpython/vdigit/toolbars.py:187 @@ -13622,6 +13619,9 @@ msgid "" "%s web service was not found in fetched capabilities file from <%s>:\n" "%s\n" msgstr "" +"%s il servizio web non è stato trovato nel file delle capabilities ottenuto " +"da <%s>:\n" +"%s\n" #: ../gui/wxpython/web_services/widgets.py:834 msgid "Source image format" @@ -17644,7 +17644,7 @@ msgstr "" #: ../gui/wxpython/lmgr/statusbar.py:137 msgid "Are you sure that you want to remove the MASK?" -msgstr "" +msgstr "Sei sicuro che vuoi rimuovere la MASK?" #: ../gui/wxpython/lmgr/statusbar.py:138 #, fuzzy @@ -17718,29 +17718,24 @@ msgid "Show attribute data for selected vector map" msgstr "Mostra i dati degli attributi per il vettoriale selezionato" #: ../gui/wxpython/lmgr/toolbars.py:128 -#, fuzzy msgid "Add multiple map layers" -msgstr "Aggiunge un layer raster" +msgstr "Aggiunge più layer raster" #: ../gui/wxpython/lmgr/toolbars.py:138 -#, fuzzy msgid "Add various raster maps" -msgstr "Aggiungi raster" +msgstr "Aggiungi più mappe raster" #: ../gui/wxpython/lmgr/toolbars.py:143 -#, fuzzy msgid "Add vector map" -msgstr "Aggiungi vettoriale" +msgstr "Aggiungi mappa vettoriale" #: ../gui/wxpython/lmgr/toolbars.py:148 -#, fuzzy msgid "Add various vector maps" -msgstr "Aggiungi diverse sovrapposizioni" +msgstr "Aggiungi diverse mappe vettoriali" #: ../gui/wxpython/lmgr/toolbars.py:158 -#, fuzzy msgid "Add web service map" -msgstr "Aggiungi layer da servizi web" +msgstr "Aggiungi mappa da servizi web" #: ../gui/wxpython/lmgr/toolbars.py:169 #, fuzzy @@ -17926,7 +17921,7 @@ msgstr "" #: ../gui/wxpython/dbmgr/manager.py:91 msgid "Attribute Table Manager" -msgstr "" +msgstr "Gestore della Tabella degli Attributi" #: ../gui/wxpython/dbmgr/manager.py:94 #, python-brace-format @@ -17984,7 +17979,7 @@ msgstr "Caricando gli attributi per il vettoriale <%s> ..." #: ../gui/wxpython/dbmgr/g.gui.dbmgr.py:55 msgid "Attribute Table Manager - GRASS GIS" -msgstr "" +msgstr "Gestore della Tabella degli Attributi - GRASS GIS" #: ../gui/wxpython/dbmgr/sqlbuilder.py:106 ../gui/wxpython/dbmgr/base.py:2351 msgid "Database connection" @@ -18856,7 +18851,7 @@ msgstr "%s=, %s=, %s= e %s= si escludono a vicenda." #: ../gui/wxpython/animation/g.gui.animation.py:137 msgid "Animation Tool - GRASS GIS" -msgstr "" +msgstr "Strumento Animazione - GRASS GIS" #: ../gui/wxpython/animation/dialogs.py:81 msgid "Adjust speed of animation" @@ -19319,7 +19314,7 @@ msgstr "Directory {t} non rimossa." #: ../gui/wxpython/animation/frame.py:49 msgid "Animation Tool" -msgstr "" +msgstr "Strumento Animazione" #: ../gui/wxpython/animation/frame.py:192 msgid "Animation Toolbar" diff --git a/pyproject.toml b/pyproject.toml index b59eed0043c..e06ff451bd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,4 +38,4 @@ timeout = 300 [tool.bandit] exclude_dirs = ["./testsuite", "*/tests/*", "*/testsuite/*", "utils/test_generate_last_commit_file.py"] -skips = ["B324","B110", "B101", "B112", "B404"] +skips = ["B324","B110", "B101", "B112", "B311", "B404"] diff --git a/raster/r.surf.gauss/gaussurf.c b/raster/r.surf.gauss/gaussurf.c index 9c5adf12b68..66afbef6dd0 100644 --- a/raster/r.surf.gauss/gaussurf.c +++ b/raster/r.surf.gauss/gaussurf.c @@ -29,11 +29,6 @@ int gaussurf(char *out, /* Name of raster maps to be opened. */ int row_count, col_count; - /****** INITIALISE RANDOM NUMBER GENERATOR ******/ - - /* You can set GRASS_RANDOM_SEED for repeatability */ - G_math_srand_auto(); - /****** OPEN CELL FILES AND GET CELL DETAILS ******/ fd_out = Rast_open_new(out, DCELL_TYPE); diff --git a/raster/r.surf.gauss/main.c b/raster/r.surf.gauss/main.c index b2d28e37967..91f261b1144 100644 --- a/raster/r.surf.gauss/main.c +++ b/raster/r.surf.gauss/main.c @@ -24,11 +24,13 @@ int main(int argc, char *argv[]) /****** INITIALISE ******/ double gauss_mean, gauss_sigma; + long seed_value; struct GModule *module; struct Option *out; struct Option *mean; struct Option *sigma; + struct Option *seed; G_gisinit(argv[0]); @@ -55,9 +57,31 @@ int main(int argc, char *argv[]) sigma->type = TYPE_DOUBLE; sigma->answer = "1.0"; + seed = G_define_option(); + seed->key = "seed"; + seed->type = TYPE_INTEGER; + seed->required = NO; + seed->label = _("Seed for random number generator"); + seed->description = _("The same seed can be used to obtain same results" + " or random seed can be generated by other means."); + if (G_parser(argc, argv)) exit(EXIT_FAILURE); + /****** INITIALISE RANDOM NUMBER GENERATOR ******/ + if (seed->answer) { + seed_value = atol(seed->answer); + G_srand48(seed_value); + G_verbose_message(_("Read random seed from %s option: %ld"), seed->key, + seed_value); + } + else { + /* default as it used to be */ + seed_value = G_math_srand_auto(); + G_verbose_message(_("Autogenerated random seed set to: %ld"), + seed_value); + } + sscanf(mean->answer, "%lf", &gauss_mean); sscanf(sigma->answer, "%lf", &gauss_sigma); diff --git a/raster/r.surf.gauss/testsuite/test_r_surf_gauss.py b/raster/r.surf.gauss/testsuite/test_r_surf_gauss.py new file mode 100644 index 00000000000..deb29e086e9 --- /dev/null +++ b/raster/r.surf.gauss/testsuite/test_r_surf_gauss.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 + +""" +MODULE: Test of r.surf.gauss + +AUTHOR(S): Corey White + +PURPOSE: Tests random gauss surface generation + +COPYRIGHT: (C) 2023 - 2024 by Corey White and the GRASS Development Team + +This program is free software under the GNU General Public +License (>=v2). Read the file COPYING that comes with GRASS +for details. +""" + +import os +from grass.gunittest.case import TestCase +from grass.gunittest.main import test + + +class MeanSigmaTestCase(TestCase): + """Test r.surf.gauss module""" + + # Raster map name be used as output + output = "random_result" + + @classmethod + def setUpClass(cls): + """Ensures expected computational region""" + os.environ["GRASS_RANDOM_SEED"] = "42" + # modifying region just for this script + cls.use_temp_region() + cls.runModule("g.region", rows=10, cols=10) + + @classmethod + def tearDownClass(cls): + """Remove the temporary region""" + cls.del_temp_region() + + def tearDown(self): + """Remove the output created from the module""" + self.runModule("g.remove", flags="f", type="raster", name=[self.output]) + + def test_defaut_settings(self): + """Check to see if univariate statistics match for default""" + self.assertModule("r.surf.gauss", output=self.output) + self.assertRasterFitsUnivar( + self.output, + reference=dict(mean=-0.044860, stddev=1.019485), + precision=1e-6, + ) + + def test_mean_sigma_params(self): + """Check if mean and sigma params are accepted""" + mean_value = 3.0 + sigma_value = 5.8 + self.assertModule( + "r.surf.gauss", + mean=mean_value, + sigma=sigma_value, + output=self.output, + ) + self.assertRasterExists(self.output, msg="Output was not created") + self.assertRasterFitsUnivar( + self.output, + reference=dict(mean=2.739812, stddev=5.913014), + precision=1e-6, + ) + + def test_random_seed_option(self): + """Checks if random seed option sets random number""" + mean_value = 3.0 + sigma_value = 5.8 + self.assertModule( + "r.surf.gauss", + mean=mean_value, + sigma=sigma_value, + output=self.output, + seed=22, + ) + self.assertRasterExists(self.output, msg="Output was not created") + self.assertRasterFitsUnivar( + self.output, + reference=dict(mean=3.183532, stddev=6.050756), + precision=1e-6, + ) + + +if __name__ == "__main__": + test() diff --git a/raster/r.what/Makefile b/raster/r.what/Makefile index 8cf21eb01bf..e30ba7d7003 100644 --- a/raster/r.what/Makefile +++ b/raster/r.what/Makefile @@ -2,7 +2,7 @@ MODULE_TOPDIR = ../.. PGM = r.what -LIBES = $(RASTERLIB) $(GISLIB) $(VECTORLIB) +LIBES = $(PARSONLIB) $(RASTERLIB) $(GISLIB) $(VECTORLIB) DEPENDENCIES = $(RASTERDEP) $(GISDEP) $(VECTORDEP) EXTRA_INC = $(VECT_INC) EXTRA_CFLAGS = $(VECT_CFLAGS) diff --git a/raster/r.what/main.c b/raster/r.what/main.c index 7bb50721e5d..0698b6ffbd6 100644 --- a/raster/r.what/main.c +++ b/raster/r.what/main.c @@ -29,6 +29,7 @@ #include #include #include +#include struct order { int point; @@ -49,6 +50,8 @@ static int by_point(const void *, const void *); static int tty = 0; +enum OutputFormat { PLAIN, JSON }; + int main(int argc, char *argv[]) { int i, j; @@ -74,7 +77,8 @@ int main(int argc, char *argv[]) char buffer[1024]; char **ptr; struct _opt { - struct Option *input, *cache, *null, *coords, *fs, *points, *output; + struct Option *input, *cache, *null, *coords, *fs, *points, *output, + *format; } opt; struct _flg { struct Flag *label, *cache, *cat_int, *color, *header, *cat; @@ -93,6 +97,12 @@ int main(int argc, char *argv[]) int red, green, blue; struct GModule *module; + JSON_Value *root_value = NULL, *point_value, *layer_value; + JSON_Array *root_array; + JSON_Object *point_object, *layer_object; + + enum OutputFormat format; + G_gisinit(argv[0]); /* Set description */ @@ -130,6 +140,17 @@ int main(int argc, char *argv[]) opt.fs = G_define_standard_option(G_OPT_F_SEP); opt.fs->guisection = _("Print"); + opt.format = G_define_option(); + opt.format->key = "format"; + opt.format->type = TYPE_STRING; + opt.format->required = NO; + opt.format->label = _("Output format"); + opt.format->options = "plain,json"; + opt.format->descriptions = "plain;Plain text output;" + "json;JSON (JavaScript Object Notation);"; + opt.format->answer = "plain"; + opt.format->guisection = _("Print"); + opt.cache = G_define_option(); opt.cache->key = "cache"; opt.cache->type = TYPE_INTEGER; @@ -255,8 +276,21 @@ int main(int argc, char *argv[]) Cats = Vect_new_cats_struct(); G_get_window(&window); + if (strcmp(opt.format->answer, "json") == 0) + format = JSON; + else + format = PLAIN; + + if (format == JSON) { + root_value = json_value_init_array(); + if (root_value == NULL) { + G_fatal_error(_("Failed to initialize JSON array. Out of memory?")); + } + root_array = json_array(root_value); + } + /* print header row */ - if (flg.header->answer) { + if (format == PLAIN && flg.header->answer) { if (flg.cat->answer) { fprintf(stdout, "cat%s", fs); } @@ -466,56 +500,112 @@ int main(int argc, char *argv[]) qsort(cache, point_cnt, sizeof(struct order), by_point); /* report data from re-ordered cache */ - for (point = 0; point < point_cnt; point++) { G_debug(1, "%s|%s at col %d, row %d\n", cache[point].east_buf, cache[point].north_buf, cache[point].col, cache[point].row); - if (flg.cat->answer) { - fprintf(stdout, "%d%s", cache[point].cat, fs); + if (format == PLAIN) { + + if (flg.cat->answer) { + fprintf(stdout, "%d%s", cache[point].cat, fs); + } + fprintf(stdout, "%s%s%s%s%s", cache[point].east_buf, fs, + cache[point].north_buf, fs, cache[point].lab_buf); + + for (i = 0; i < nfiles; i++) { + if (out_type[i] == CELL_TYPE) { + if (Rast_is_c_null_value(&cache[point].value[i])) { + fprintf(stdout, "%s%s", fs, null_str); + if (flg.label->answer) + fprintf(stdout, "%s", fs); + if (flg.color->answer) + fprintf(stdout, "%s", fs); + continue; + } + fprintf(stdout, "%s%ld", fs, + (long)cache[point].value[i]); + cache[point].dvalue[i] = cache[point].value[i]; + } + else { /* FCELL or DCELL */ + + if (Rast_is_d_null_value(&cache[point].dvalue[i])) { + fprintf(stdout, "%s%s", fs, null_str); + if (flg.label->answer) + fprintf(stdout, "%s", fs); + if (flg.color->answer) + fprintf(stdout, "%s", fs); + continue; + } + if (out_type[i] == FCELL_TYPE) + sprintf(tmp_buf, "%.7g", cache[point].dvalue[i]); + else /* DCELL */ + sprintf(tmp_buf, "%.15g", cache[point].dvalue[i]); + G_trim_decimal(tmp_buf); /* not needed with %g? */ + fprintf(stdout, "%s%s", fs, tmp_buf); + } + if (flg.label->answer) + fprintf(stdout, "%s%s", fs, + Rast_get_d_cat(&(cache[point].dvalue[i]), + &cats[i])); + if (flg.color->answer) + fprintf(stdout, "%s%s", fs, cache[point].clr_buf[i]); + } + fprintf(stdout, "\n"); } - fprintf(stdout, "%s%s%s%s%s", cache[point].east_buf, fs, - cache[point].north_buf, fs, cache[point].lab_buf); + else { + point_value = json_value_init_object(); + point_object = json_object(point_value); - for (i = 0; i < nfiles; i++) { - if (out_type[i] == CELL_TYPE) { - if (Rast_is_c_null_value(&cache[point].value[i])) { - fprintf(stdout, "%s%s", fs, null_str); + if (flg.cat->answer) { + json_object_set_number(point_object, "cat", + cache[point].cat); + } + + json_object_set_number(point_object, "easting", + atof(cache[point].east_buf)); + json_object_set_number(point_object, "northing", + atof(cache[point].north_buf)); + json_object_set_string(point_object, "site_name", + cache[point].lab_buf); + + for (i = 0; i < nfiles; i++) { + layer_value = json_value_init_object(); + layer_object = json_object(layer_value); + + if (Rast_is_c_null_value(&cache[point].value[i]) || + Rast_is_d_null_value(&cache[point].dvalue[i])) { + json_object_set_null(layer_object, "value"); if (flg.label->answer) - fprintf(stdout, "%s", fs); + json_object_set_null(layer_object, "label"); if (flg.color->answer) - fprintf(stdout, "%s", fs); - continue; + json_object_set_null(layer_object, "color"); } - fprintf(stdout, "%s%ld", fs, (long)cache[point].value[i]); - cache[point].dvalue[i] = cache[point].value[i]; - } - else { /* FCELL or DCELL */ - - if (Rast_is_d_null_value(&cache[point].dvalue[i])) { - fprintf(stdout, "%s%s", fs, null_str); + else { + if (out_type[i] == CELL_TYPE) { + json_object_set_number(layer_object, "value", + (long)cache[point].value[i]); + cache[point].dvalue[i] = cache[point].value[i]; + } + else { /* FCELL or DCELL */ + json_object_set_number(layer_object, "value", + cache[point].dvalue[i]); + } if (flg.label->answer) - fprintf(stdout, "%s", fs); + json_object_set_string( + layer_object, "label", + Rast_get_d_cat(&(cache[point].dvalue[i]), + &cats[i])); if (flg.color->answer) - fprintf(stdout, "%s", fs); - continue; + json_object_set_string(layer_object, "color", + cache[point].clr_buf[i]); } - if (out_type[i] == FCELL_TYPE) - sprintf(tmp_buf, "%.7g", cache[point].dvalue[i]); - else /* DCELL */ - sprintf(tmp_buf, "%.15g", cache[point].dvalue[i]); - G_trim_decimal(tmp_buf); /* not needed with %g? */ - fprintf(stdout, "%s%s", fs, tmp_buf); + + json_object_set_value(point_object, opt.input->answers[i], + layer_value); } - if (flg.label->answer) - fprintf( - stdout, "%s%s", fs, - Rast_get_d_cat(&(cache[point].dvalue[i]), &cats[i])); - if (flg.color->answer) - fprintf(stdout, "%s%s", fs, cache[point].clr_buf[i]); + json_array_append_value(root_array, point_value); } - fprintf(stdout, "\n"); } if (cache_report & !tty) @@ -527,6 +617,17 @@ int main(int argc, char *argv[]) cache_hit = cache_miss = 0; } + if (format == JSON) { + char *serialized_string = NULL; + serialized_string = json_serialize_to_string_pretty(root_value); + if (serialized_string == NULL) { + G_fatal_error(_("Failed to initialize pretty JSON string.")); + } + puts(serialized_string); + json_free_serialized_string(serialized_string); + json_value_free(root_value); + } + if (!opt.coords->answers && !opt.points->answers && tty) fprintf(stderr, "\n"); if (cache_report & !tty) diff --git a/raster/r.what/testsuite/test_r_what.py b/raster/r.what/testsuite/test_r_what.py index 12a8be2f90d..fcc312e6284 100644 --- a/raster/r.what/testsuite/test_r_what.py +++ b/raster/r.what/testsuite/test_r_what.py @@ -12,6 +12,7 @@ from grass.gunittest.case import TestCase from grass.gunittest.gmodules import SimpleModule import os +import json class TestRasterWhat(TestCase): @@ -437,6 +438,25 @@ class TestRasterWhat(TestCase): 332533.5941495|242831.139883875||121 """ + @staticmethod + def convert_plain_to_json(plain): + data = [] + lines = plain.split("\n") + for line in lines: + line = line.strip() + if line: + parts = line.split("|") + item = { + "easting": float(parts[0]), + "northing": float(parts[1]), + "site_name": parts[2], + "boundary_county_500m": {"value": int(parts[3])}, + } + if len(parts) == 5: + item["boundary_county_500m"]["color"] = parts[4] + data.append(item) + return data + @classmethod def setUpClass(cls): cls.use_temp_region() @@ -541,6 +561,32 @@ def test_raster_what_cache(self): msg="test_raster_what_cats did't run successfully", ) + def test_raster_what_json(self): + """Testing r.what runs successfully with input coordinates given as a vector points map and JSON output""" + reference = self.convert_plain_to_json(self.refrence_points) + module = SimpleModule( + "r.what", map=self.map1, points=self.points, format="json" + ) + module.run() + self.assertListEqual( + json.loads(str(module.outputs.stdout)), + reference, + "test_raster_what_points did't run successfully", + ) + + def test_raster_what_points_flag_r_json(self): + """Testing r.what runs successfully with flag r and json output""" + reference = self.convert_plain_to_json(self.refrence_flag_r) + module = SimpleModule( + "r.what", map=self.map1, points=self.points, flags="r", format="json" + ) + module.run() + self.assertListEqual( + json.loads(str(module.outputs.stdout)), + reference, + "test_raster_what_cats did't run successfully", + ) + if __name__ == "__main__": from grass.gunittest.main import test diff --git a/scripts/v.dissolve/v.dissolve.html b/scripts/v.dissolve/v.dissolve.html index cc69e21e1cc..54044a1bcf4 100644 --- a/scripts/v.dissolve/v.dissolve.html +++ b/scripts/v.dissolve/v.dissolve.html @@ -1,87 +1,87 @@

DESCRIPTION

-The v.dissolve module is used to merge adjacent or overlapping -features in a vector map that share the same category value. The -resulting merged feature(s) retain this category value. +The v.dissolve module is used to merge adjacent or overlapping +features in a vector map that share the same category value. The +resulting merged feature(s) retain this category value.

-Figure: Areas with the same attribute value (first image) are merged +Figure: Areas with the same attribute value (first image) are merged into one (second image).

-Instead of dissolving features based on the category values, the user -can define an integer or string column using the column -parameter. In that case, features that share the same value in that -column are dissolved. Note, the newly created layer does not retain the +Instead of dissolving features based on the category values, the user +can define an integer or string column using the column +parameter. In that case, features that share the same value in that +column are dissolved. Note, the newly created layer does not retain the category (cat) values from the input layer.

-Note that multiple areas with the same category or the same attribute -value that are not adjacent are merged into one entity, which consists +Note that multiple areas with the same category or the same attribute +value that are not adjacent are merged into one entity, which consists of multiple features, i.e., a multipart feature.

Attribute aggregation

-The attributes of merged areas can be aggregated using various -aggregation methods. The specific methods available depend on the -backend used for aggregation. Two aggregate backends (specified with -the aggregate_backend parameter) are available, univar -and sql. The backend is determined automatically based on the -requested methods. When the function is one of the SQL -build-in aggregate functions, the sql backend is used. -Otherwise, the univar backend is used. +The attributes of merged areas can be aggregated using various +aggregation methods. The specific methods available depend on the +backend used for aggregation. Two aggregate backends (specified with +the aggregate_backend parameter) are available, univar +and sql. The backend is determined automatically based on the +requested methods. When the function is one of the SQL +build-in aggregate functions, the sql backend is used. +Otherwise, the univar backend is used.

-The default behavior is intended for interactive use and -testing. For scripting and other automated usage, explicitly specifying -the backend with the aggregate_backend parameter is strongly -recommended. When choosing, note that the sql aggregate -backend, regardless of the underlying database, will typically perform +The default behavior is intended for interactive use and +testing. For scripting and other automated usage, explicitly specifying +the backend with the aggregate_backend parameter is strongly +recommended. When choosing, note that the sql aggregate +backend, regardless of the underlying database, will typically perform significantly better than the univar backend.

Aggregation using univar backend

-When univar is used, the methods available are the ones which -v.db.univar uses by default, i.e., n, min, -max, range, mean, mean_abs, -variance, stddev, coef_var, and +When univar is used, the methods available are the ones which +v.db.univar uses by default, i.e., n, min, +max, range, mean, mean_abs, +variance, stddev, coef_var, and sum.

Aggregation using sql backend

-When the sql backend is used, the methods depend on the SQL -database backend used for the attribute table of the input vector. For -SQLite, there are at least the following built-in aggregate -functions: count, min, max, +When the sql backend is used, the methods depend on the SQL +database backend used for the attribute table of the input vector. For +SQLite, there are at least the following built-in aggregate +functions: count, min, max, avg, sum, and total. -For PostgreSQL, the list of aggregate -functions is much longer and includes, e.g., count, -min, max, avg, sum, -stddev, and variance. +For PostgreSQL, the list of aggregate +functions is much longer and includes, e.g., count, +min, max, avg, sum, +stddev, and variance.

Defining the aggregation method

-If only the parameter aggregate_columns is provided, all the -following aggregation statistics are calculated: n, -min, max, mean, and sum. If the -univar backend is specified, all the available methods for the +If only the parameter aggregate_columns is provided, all the +following aggregation statistics are calculated: n, +min, max, mean, and sum. If the +univar backend is specified, all the available methods for the univar backend are used.

-The aggregate_methods parameter can be used to specify which -aggregation statistics should be computed. Alternatively, the parameter -aggregate_columns can be used to specify the method using SQL -syntax. This provides the highest flexibility, and it is suitable for -scripting. The SQL statement should specify both the column and the +The aggregate_methods parameter can be used to specify which +aggregation statistics should be computed. Alternatively, the parameter +aggregate_columns can be used to specify the method using SQL +syntax. This provides the highest flexibility, and it is suitable for +scripting. The SQL statement should specify both the column and the functions applied, e.g.,

@@ -89,24 +89,24 @@ 

Defining the aggregation method

-Note that when the aggregate_columns parameter is used, the -sql backend should be used. In addition, the -aggregate_columns and aggregate_methods cannot be used +Note that when the aggregate_columns parameter is used, the +sql backend should be used. In addition, the +aggregate_columns and aggregate_methods cannot be used together.

-For convenience, certain methods, namely n, count, -mean, and avg, are automatically converted to the -appropriate name for the selected backend. However, for scripting, it -is recommended to specify the appropriate method (function) name for -the backend, as the conversion is a heuristic that may change in the +For convenience, certain methods, namely n, count, +mean, and avg, are automatically converted to the +appropriate name for the selected backend. However, for scripting, it +is recommended to specify the appropriate method (function) name for +the backend, as the conversion is a heuristic that may change in the future.

-If the result_columns is not provided, each method is applied to +If the result_columns is not provided, each method is applied to each column specified by aggregate_columns. This results in a -column for each of the combinations. These result columns have -auto-generated names based on the aggregate column and method. For +column for each of the combinations. These result columns have +auto-generated names based on the aggregate column and method. For example, setting the following parameters:

@@ -115,13 +115,13 @@ 

Defining the aggregation method

-results in the following columns: A_sum, A_n, B_sum, B_n. See +results in the following columns: A_sum, A_n, B_sum, B_n. See the Examples section.

-If the result_column is provided, each method is applied only -once to the matching column in the aggregate column list, and the -result will be available under the name of the matching result column. +If the result_column is provided, each method is applied only +once to the matching column in the aggregate column list, and the +result will be available under the name of the matching result column. For example, setting the following parameter:

@@ -131,45 +131,45 @@ 

Defining the aggregation method

-results in the column sum_a with the sum of the values of -A and the column n_b with the max of B. Note that -the number of items in aggregate_columns, -aggregate_methods (unless omitted), and result_column -needs to match, and no combinations are created on the fly. See +results in the column sum_a with the sum of the values of +A and the column n_b with the max of B. Note that +the number of items in aggregate_columns, +aggregate_methods (unless omitted), and result_column +needs to match, and no combinations are created on the fly. See the Examples section.

-For scripting, it is recommended to specify all resulting column names, -while for interactive use, automatically created combinations are +For scripting, it is recommended to specify all resulting column names, +while for interactive use, automatically created combinations are expected to be beneficial, especially for exploratory analysis.

-The type of the result column is determined based on the method -selected. For n and count, the type is INTEGER and -for all other methods, it is DOUBLE. Aggregate methods that produce -other types require the type to be specified as part of the -result_columns. A type can be provided in result_columns -using the SQL syntax name type, e.g., sum_of_values -double precision. Type specification is mandatory when SQL -syntax is used in aggregate_columns (and +The type of the result column is determined based on the method +selected. For n and count, the type is INTEGER and +for all other methods, it is DOUBLE. Aggregate methods that produce +other types require the type to be specified as part of the +result_columns. A type can be provided in result_columns +using the SQL syntax name type, e.g., sum_of_values +double precision. Type specification is mandatory when SQL +syntax is used in aggregate_columns (and aggregate_methods is omitted).

NOTES

-GRASS defines a vector area as a composite entity consisting of a set -of closed boundaries and a centroid. The centroids must contain a -category number (see v.centroids), this number is linked to -area attributes and database links. +GRASS defines a vector area as a composite entity consisting of a set +of closed boundaries and a centroid. The centroids must contain a +category number (see v.centroids), this number is linked to +area attributes and database links.

-Multiple attributes may be linked to a single vector entity through -numbered fields referred to as layers. Refer to v.category for +Multiple attributes may be linked to a single vector entity through +numbered fields referred to as layers. Refer to v.category for more details.

-Merging of areas can also be accomplished using v.extract -d -which provides some additional options. In fact, v.dissolve is -simply a front-end to that module. The use of the column +Merging of areas can also be accomplished using v.extract -d +which provides some additional options. In fact, v.dissolve is +simply a front-end to that module. The use of the column parameter adds a call to v.reclass before. @@ -262,27 +262,27 @@

Aggregating multiple attributes

-By default, all methods specified in the aggregate_methods are -applied to all columns, so result of the above is four columns. While -this is convenient for getting multiple statistics for similar columns -(e.g. averages and standard deviations of multiple population -statistics columns), in our case, each column is different and each +By default, all methods specified in the aggregate_methods are +applied to all columns, so result of the above is four columns. While +this is convenient for getting multiple statistics for similar columns +(e.g. averages and standard deviations of multiple population +statistics columns), in our case, each column is different and each aggregate method should be applied only to its corresponding column.

-The v.dissolve module will apply each aggregate method only to -the corresponding column when column names for the results are +The v.dissolve module will apply each aggregate method only to +the corresponding column when column names for the results are specified manually with the result_columns option:

 v.dissolve input=boundary_municp column=DOTURBAN_N output=municipalities_4 \
-	aggregate_columns=ACRES,NEW_PERC_G aggregate_methods=sum,avg \ 
+	aggregate_columns=ACRES,NEW_PERC_G aggregate_methods=sum,avg \
 	result_columns=acres,new_perc_g
 

-Now we have full control over what columns are created, but we also -need to specify an aggregate method for each column even when the +Now we have full control over what columns are created, but we also +need to specify an aggregate method for each column even when the aggregate methods are the same:

@@ -292,29 +292,29 @@ 

Aggregating multiple attributes

-While it is often not necessary to specify aggregate methods or names -for interactive exploratory analysis, specifying both -aggregate_methods and result_columns manually is a best -practice for scripting (unless SQL syntax is used for +While it is often not necessary to specify aggregate methods or names +for interactive exploratory analysis, specifying both +aggregate_methods and result_columns manually is a best +practice for scripting (unless SQL syntax is used for aggregate_columns, see below).

Aggregating using SQL syntax

-The aggregation can be done also using the full SQL syntax and set of +The aggregation can be done also using the full SQL syntax and set of aggregate functions available for a given attribute database backend. Here, we will assume the default SQLite database backend for attribute.

-Modifying the previous example, we will now specify the SQL aggregate -function calls explicitly instead of letting v.dissolve -generate them for us. We will compute sum of the ACRES column using -sum(ACRES) (alternatively, we could use SQLite specific -total(ACRES) which returns zero even when all values are -NULL). Further, we will count number of aggregated (i.e., dissolved) -parts using count(*) which counts all rows regardless of -NULL values. Then, we will count all unique names of parts as -distinguished by the MB_NAME column using count(distinct -MB_NAME). Finally, we will collect all these names into a +Modifying the previous example, we will now specify the SQL aggregate +function calls explicitly instead of letting v.dissolve +generate them for us. We will compute sum of the ACRES column using +sum(ACRES) (alternatively, we could use SQLite specific +total(ACRES) which returns zero even when all values are +NULL). Further, we will count number of aggregated (i.e., dissolved) +parts using count(*) which counts all rows regardless of +NULL values. Then, we will count all unique names of parts as +distinguished by the MB_NAME column using count(distinct +MB_NAME). Finally, we will collect all these names into a comma-separated list using group_concat(MB_NAME):

@@ -324,15 +324,15 @@ 

Aggregating using SQL syntax

-Here, v.dissolve doesn't make any assumptions about the -resulting column types, so we specified both named and the type of each +Here, v.dissolve doesn't make any assumptions about the +resulting column types, so we specified both named and the type of each column.

-When working with general SQL syntax, v.dissolve turns off its -checks for number of aggregate and result columns to allow for all SQL -syntax to be used for aggregate columns. This allows us to use also -functions with multiple parameters, for example specify separator to be +When working with general SQL syntax, v.dissolve turns off its +checks for number of aggregate and result columns to allow for all SQL +syntax to be used for aggregate columns. This allows us to use also +functions with multiple parameters, for example specify separator to be used with group_concat:

@@ -342,7 +342,7 @@ 

Aggregating using SQL syntax

-To inspect the result, we will use v.db.select retrieving only +To inspect the result, we will use v.db.select retrieving only one row for DOTURBAN_N == 'Wadesboro':