diff --git a/.devcontainer/README.MD b/.devcontainer/README.MD new file mode 100644 index 000000000000..d9fe31f38f3a --- /dev/null +++ b/.devcontainer/README.MD @@ -0,0 +1 @@ +The files in this directory configure a development container for GitHub Codespaces. \ No newline at end of file diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 000000000000..a107c7891727 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,15 @@ +{ + "name": "OpenDevin Codespaces", + "image": "mcr.microsoft.com/devcontainers/universal", + "customizations":{ + "vscode":{ + "extensions": [ + "ms-python.python" + ] + } + }, + "onCreateCommand": "sh ./.devcontainer/on_create.sh", + "postCreateCommand": "make build", + "postStartCommand": "nohup bash -c 'make run > output.log 2>&1 &'" + +} diff --git a/.devcontainer/on_create.sh b/.devcontainer/on_create.sh new file mode 100644 index 000000000000..4b8d592ae51a --- /dev/null +++ b/.devcontainer/on_create.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +sudo apt update +sudo apt install -y netcat +sudo add-apt-repository -y ppa:deadsnakes/ppa +sudo apt install -y python3.11 +curl -sSL https://install.python-poetry.org | python3.11 - +# chromadb requires SQLite > 3.35 but SQLite in Python3.11.9 comes with 3.31.1 +sudo cp /opt/conda/lib/libsqlite3.so.0 /lib/x86_64-linux-gnu/libsqlite3.so.0 +cat << EOF > config.toml +[core] +workspace_base = "./workspace" +debug = 1 + +[sandbox] +use_host_network = 1 +persist_sandbox = 1 +fast_boot = 1 +user_id = 1001 +EOF diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index b15e920a2829..bc649f426695 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,8 +1,11 @@ **What is the problem that this fixes or functionality that this introduces? Does it fix any open issues?** ---- + +--- **Give a summary of what the PR does, explaining any non-trivial design decisions** + + --- **Other references** diff --git a/.github/workflows/clean-up.yml b/.github/workflows/clean-up.yml new file mode 100644 index 000000000000..a00dad7282db --- /dev/null +++ b/.github/workflows/clean-up.yml @@ -0,0 +1,68 @@ +# Workflow that cleans up outdated and old workflows to prevent out of disk issues +name: Delete old workflow runs + +on: + workflow_dispatch: + inputs: + days: + description: 'Days-worth of runs to keep for each workflow' + required: true + default: '30' + minimum_runs: + description: 'Minimum runs to keep for each workflow' + required: true + default: '10' + delete_workflow_pattern: + description: 'Name or filename of the workflow (if not set, all workflows are targeted)' + required: false + delete_workflow_by_state_pattern: + description: 'Filter workflows by state: active, deleted, disabled_fork, disabled_inactivity, disabled_manually' + required: true + default: "ALL" + type: choice + options: + - "ALL" + - active + - deleted + - disabled_inactivity + - disabled_manually + delete_run_by_conclusion_pattern: + description: 'Remove runs based on conclusion: action_required, cancelled, failure, skipped, success' + required: true + default: 'ALL' + type: choice + options: + - 'ALL' + - 'Unsuccessful: action_required,cancelled,failure,skipped' + - action_required + - cancelled + - failure + - skipped + - success + dry_run: + description: 'Logs simulated changes, no deletions are performed' + required: false + +jobs: + del_runs: + runs-on: ubuntu-latest + permissions: + actions: write + contents: read + steps: + - name: Delete workflow runs + uses: Mattraks/delete-workflow-runs@v2 + with: + token: ${{ github.token }} + repository: ${{ github.repository }} + retain_days: ${{ github.event.inputs.days }} + keep_minimum_runs: ${{ github.event.inputs.minimum_runs }} + delete_workflow_pattern: ${{ github.event.inputs.delete_workflow_pattern }} + delete_workflow_by_state_pattern: ${{ github.event.inputs.delete_workflow_by_state_pattern }} + delete_run_by_conclusion_pattern: >- + ${{ + startsWith(github.event.inputs.delete_run_by_conclusion_pattern, 'Unsuccessful:') + && 'action_required,cancelled,failure,skipped' + || github.event.inputs.delete_run_by_conclusion_pattern + }} + dry_run: ${{ github.event.inputs.dry_run }} diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 939e72bcb189..b4ed558a0c1d 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -1,3 +1,4 @@ +# Workflow that builds and deploys the documentation website name: Deploy Docs to GitHub Pages on: @@ -5,10 +6,13 @@ on: branches: - main pull_request: + paths: + - 'docs/**' branches: - main jobs: + # Build the documentation website build: name: Build Docusaurus runs-on: ubuntu-latest @@ -25,23 +29,23 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" - + python-version: '3.11' - name: Generate Python Docs run: rm -rf docs/modules/python && pip install pydoc-markdown && pydoc-markdown - name: Install dependencies run: cd docs && npm ci - name: Build website run: cd docs && npm run build - - name: Upload Build Artifact if: github.ref == 'refs/heads/main' uses: actions/upload-pages-artifact@v3 with: path: docs/build + # Deploy the documentation website deploy: name: Deploy to GitHub Pages + runs-on: ubuntu-latest needs: build if: github.ref == 'refs/heads/main' && github.repository == 'OpenDevin/OpenDevin' # Grant GITHUB_TOKEN the permissions required to make a Pages deployment @@ -52,7 +56,6 @@ jobs: environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} - runs-on: ubuntu-latest steps: - name: Deploy to GitHub Pages id: deployment diff --git a/.github/workflows/dummy-agent-test.yml b/.github/workflows/dummy-agent-test.yml index 8422f0c361ef..6cf4d5900c66 100644 --- a/.github/workflows/dummy-agent-test.yml +++ b/.github/workflows/dummy-agent-test.yml @@ -1,3 +1,4 @@ +# Workflow that uses the DummyAgent to run a simple task name: Run E2E test with dummy agent concurrency: @@ -10,9 +11,6 @@ on: - main pull_request: -env: - PERSIST_SANDBOX : "false" - jobs: test: runs-on: ubuntu-latest @@ -25,7 +23,7 @@ jobs: - name: Set up environment run: | curl -sSL https://install.python-poetry.org | python3 - - poetry install --without evaluation + poetry install --without evaluation,llama-index poetry run playwright install --with-deps chromium wget https://huggingface.co/BAAI/bge-small-en-v1.5/raw/main/1_Pooling/config.json -P /tmp/llama_index/models--BAAI--bge-small-en-v1.5/snapshots/5c38ec7c405ec4b44b94cc5a9bb96e735b38267a/1_Pooling/ - name: Run tests diff --git a/.github/workflows/ghcr-runtime.yml b/.github/workflows/ghcr-runtime.yml deleted file mode 100644 index cecb1807c04d..000000000000 --- a/.github/workflows/ghcr-runtime.yml +++ /dev/null @@ -1,265 +0,0 @@ -name: Build Publish and Test Runtime Image - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} - -on: - push: - branches: - - main - tags: - - '*' - pull_request: - workflow_dispatch: - inputs: - reason: - description: 'Reason for manual trigger' - required: true - default: '' - -jobs: - ghcr_build_runtime: - runs-on: ubuntu-latest - - outputs: - tags: ${{ steps.capture-tags.outputs.tags }} - - permissions: - contents: read - packages: write - - strategy: - matrix: - image: ["od_runtime"] - base_image: ["ubuntu:22.04"] - platform: ["amd64", "arm64"] - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - with: - # this might remove tools that are actually needed, - # if set to "true" but frees about 6 GB - tool-cache: true - # all of these default to true, but feel free to set to - # "false" if necessary for your workflow - android: true - dotnet: true - haskell: true - large-packages: true - docker-images: false - swap-storage: true - - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v3 - - - name: Install poetry via pipx - run: pipx install poetry - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - cache: "poetry" - - - name: Install Python dependencies using Poetry - run: make install-python-dependencies - - - name: Create source distribution and Dockerfile - run: poetry run python3 opendevin/runtime/utils/runtime_build.py --base_image ${{ matrix.base_image }} --build_folder containers/runtime - - - name: Build and export image - id: build - run: ./containers/build.sh ${{ matrix.image }} ${{ github.repository_owner }} ${{ matrix.platform }} - - - name: Capture tags - id: capture-tags - run: | - tags=$(cat tags.txt) - echo "tags=$tags" - echo "tags=$tags" >> $GITHUB_OUTPUT - - - name: Upload Docker image as artifact - uses: actions/upload-artifact@v4 - with: - name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} - path: /tmp/${{ matrix.image }}_image_${{ matrix.platform }}.tar - - test-for-runtime: - name: Test for Runtime - runs-on: ubuntu-latest - needs: ghcr_build_runtime - env: - PERSIST_SANDBOX: "false" - steps: - - uses: actions/checkout@v4 - - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - with: - # this might remove tools that are actually needed, - # when set to "true" but frees about 6 GB - tool-cache: true - - # all of these default to true, but feel free to set to - # "false" if necessary for your workflow - android: true - dotnet: true - haskell: true - large-packages: true - swap-storage: true - - - name: Install poetry via pipx - run: pipx install poetry - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - cache: "poetry" - - - name: Install Python dependencies using Poetry - run: make install-python-dependencies - - - name: Download Runtime Docker image - uses: actions/download-artifact@v4 - with: - name: od_runtime-docker-image-amd64 - path: /tmp/ - - - name: Load Runtime image and run runtime tests - run: | - # Load the Docker image and capture the output - output=$(docker load -i /tmp/od_runtime_image_amd64.tar) - - # Extract the first image name from the output - image_name=$(echo "$output" | grep -oP 'Loaded image: \K.*' | head -n 1) - - # Print the full name of the image - echo "Loaded Docker image: $image_name" - - SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true poetry run pytest --cov=agenthub --cov=opendevin --cov-report=xml -s ./tests/unit/test_runtime.py - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v4 - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - - ghcr_push: - runs-on: ubuntu-latest - # don't push if runtime tests fail - needs: [ghcr_build_runtime, test-for-runtime] - if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') - - env: - tags: ${{ needs.ghcr_build_runtime.outputs.tags }} - - permissions: - contents: read - packages: write - - strategy: - matrix: - image: ["od_runtime"] - platform: ["amd64", "arm64"] - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Free Disk Space (Ubuntu) - uses: jlumbroso/free-disk-space@main - with: - tool-cache: true - android: true - dotnet: true - haskell: true - large-packages: true - docker-images: false - swap-storage: true - - - name: Login to GHCR - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Download Docker images - uses: actions/download-artifact@v4 - with: - name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} - path: /tmp/${{ matrix.platform }} - - - name: List downloaded files - run: | - ls -la /tmp/${{ matrix.platform }} - file /tmp/${{ matrix.platform }}/* - - - name: Load images and push to registry - run: | - mv /tmp/${{ matrix.platform }}/${{ matrix.image }}_image_${{ matrix.platform }}.tar ./${{ matrix.image }}_image_${{ matrix.platform }}.tar - if ! loaded_image=$(docker load -i ${{ matrix.image }}_image_${{ matrix.platform }}.tar | grep "Loaded image:" | head -n 1 | awk '{print $3}'); then - echo "Failed to load Docker image" - exit 1 - fi - echo "loaded image = $loaded_image" - tags=$(echo ${tags} | tr ' ' '\n') - image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') - echo "image name = $image_name" - for tag in $tags; do - echo "tag = $tag" - if [ -n "$image_name" ]; then - docker tag $loaded_image $image_name:${tag}_${{ matrix.platform }} - docker push $image_name:${tag}_${{ matrix.platform }} - else - echo "Skipping tag and push due to empty image_name" - fi - done - - create_manifest: - runs-on: ubuntu-latest - needs: [ghcr_build_runtime, ghcr_push] - if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') - - env: - tags: ${{ needs.ghcr_build_runtime.outputs.tags }} - - strategy: - matrix: - image: ["od_runtime"] - - permissions: - contents: read - packages: write - - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Login to GHCR - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Create and push multi-platform manifest - run: | - image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') - echo "image name = $image_name" - tags=$(echo ${tags} | tr ' ' '\n') - for tag in $tags; do - echo 'tag = $tag' - docker buildx imagetools create --tag $image_name:$tag \ - $image_name:${tag}_amd64 \ - $image_name:${tag}_arm64 - done diff --git a/.github/workflows/ghcr.yml b/.github/workflows/ghcr.yml index 43427b641b11..66270d3a0e98 100644 --- a/.github/workflows/ghcr.yml +++ b/.github/workflows/ghcr.yml @@ -1,4 +1,5 @@ -name: Build Publish and Test Docker Image +# Workflow that builds, tests and then pushes the docker images to the ghcr.io repository +name: Build Publish and Test Runtime Image concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -19,25 +20,21 @@ on: default: '' jobs: + # Builds the OpenDevin Docker images ghcr_build: runs-on: ubuntu-latest - outputs: tags: ${{ steps.capture-tags.outputs.tags }} - permissions: contents: read packages: write - strategy: matrix: - image: ["sandbox", "opendevin"] - platform: ["amd64", "arm64"] - + image: ['opendevin'] + platform: ['amd64', 'arm64'] steps: - name: Checkout uses: actions/checkout@v4 - - name: Free Disk Space (Ubuntu) uses: jlumbroso/free-disk-space@main with: @@ -52,62 +49,152 @@ jobs: large-packages: true docker-images: false swap-storage: true - - name: Set up QEMU uses: docker/setup-qemu-action@v3 - - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v3 - - name: Build and export image id: build run: ./containers/build.sh ${{ matrix.image }} ${{ github.repository_owner }} ${{ matrix.platform }} - - name: Capture tags id: capture-tags run: | tags=$(cat tags.txt) echo "tags=$tags" echo "tags=$tags" >> $GITHUB_OUTPUT + - name: Upload Docker image as artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} + path: /tmp/${{ matrix.image }}_image_${{ matrix.platform }}.tar + retention-days: 14 + # Builds the runtime Docker images + ghcr_build_runtime: + runs-on: ubuntu-latest + outputs: + tags: ${{ steps.capture-tags.outputs.tags }} + permissions: + contents: read + packages: write + strategy: + matrix: + image: ['od_runtime'] + base_image: ['nikolaik/python-nodejs:python3.11-nodejs22'] + platform: ['amd64', 'arm64'] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # if set to "true" but frees about 6 GB + tool-cache: true + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: false + swap-storage: true + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@v3 + - name: Install poetry via pipx + run: pipx install poetry + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'poetry' + - name: Install Python dependencies using Poetry + run: make install-python-dependencies + - name: Create source distribution and Dockerfile + run: poetry run python3 opendevin/runtime/utils/runtime_build.py --base_image ${{ matrix.base_image }} --build_folder containers/runtime --force_rebuild + - name: Build and export image + id: build + run: | + if [ -f 'containers/runtime/Dockerfile' ]; then + echo 'Dockerfile detected, building runtime image...' + ./containers/build.sh ${{ matrix.image }} ${{ github.repository_owner }} ${{ matrix.platform }} + else + echo 'No Dockerfile detected which means an exact image is already built. Pulling the image and saving it to a tar file...' + source containers/runtime/config.sh + echo "$DOCKER_IMAGE_TAG $DOCKER_IMAGE_HASH_TAG" >> tags.txt + echo "Pulling image $DOCKER_IMAGE/$DOCKER_IMAGE_HASH_TAG to /tmp/${{ matrix.image }}_image_${{ matrix.platform }}.tar" + docker pull $DOCKER_IMAGE:$DOCKER_IMAGE_HASH_TAG + docker save $DOCKER_IMAGE:$DOCKER_IMAGE_HASH_TAG -o /tmp/${{ matrix.image }}_image_${{ matrix.platform }}.tar + fi + - name: Capture tags + id: capture-tags + run: | + tags=$(cat tags.txt) + echo "tags=$tags" + echo "tags=$tags" >> $GITHUB_OUTPUT - name: Upload Docker image as artifact uses: actions/upload-artifact@v4 with: name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} path: /tmp/${{ matrix.image }}_image_${{ matrix.platform }}.tar + retention-days: 14 - test-for-sandbox: - name: Test for Sandbox + # Run unit tests with the EventStream and Server runtime Docker images + test_runtime: + name: Test Runtime runs-on: ubuntu-latest - needs: ghcr_build - env: - PERSIST_SANDBOX: "false" + needs: [ghcr_build_runtime, ghcr_build] + strategy: + matrix: + runtime_type: ['eventstream'] steps: - uses: actions/checkout@v4 - + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, + # when set to "true" but frees about 6 GB + tool-cache: true + # all of these default to true, but feel free to set to + # "false" if necessary for your workflow + android: true + dotnet: true + haskell: true + large-packages: true + swap-storage: true - name: Install poetry via pipx run: pipx install poetry - - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" - cache: "poetry" - + python-version: '3.11' + cache: 'poetry' - name: Install Python dependencies using Poetry run: make install-python-dependencies - - - name: Download sandbox Docker image + - name: Download Runtime Docker image + if: matrix.runtime_type == 'eventstream' + uses: actions/download-artifact@v4 + with: + name: od_runtime-docker-image-amd64 + path: /tmp/ + - name: Download Sandbox Docker image + if: matrix.runtime_type == 'server' uses: actions/download-artifact@v4 with: name: sandbox-docker-image-amd64 path: /tmp/ - - - name: Load sandbox image and run sandbox tests + - name: Load Runtime image and run runtime tests run: | # Load the Docker image and capture the output - output=$(docker load -i /tmp/sandbox_image_amd64.tar) + if [ "${{ matrix.runtime_type }}" == "eventstream" ]; then + output=$(docker load -i /tmp/od_runtime_image_amd64.tar) + else + output=$(docker load -i /tmp/sandbox_image_amd64.tar) + fi # Extract the first image name from the output image_name=$(echo "$output" | grep -oP 'Loaded image: \K.*' | head -n 1) @@ -115,51 +202,48 @@ jobs: # Print the full name of the image echo "Loaded Docker image: $image_name" - SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true poetry run pytest --cov=agenthub --cov=opendevin --cov-report=xml -s ./tests/unit/test_sandbox.py - + TEST_RUNTIME=${{ matrix.runtime_type }} SANDBOX_USER_ID=$(id -u) SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true poetry run pytest --cov=agenthub --cov=opendevin --cov-report=xml -s ./tests/unit/test_runtime.py - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - integration-tests-on-linux: - name: Integration Tests on Linux + # Run integration tests with the eventstream runtime Docker image + runtime_integration_tests_on_linux: + name: Runtime Integration Tests on Linux runs-on: ubuntu-latest - needs: ghcr_build - env: - PERSIST_SANDBOX: "false" + needs: [ghcr_build_runtime] strategy: fail-fast: false matrix: - python-version: ["3.11"] - sandbox: ["ssh", "local"] + python-version: ['3.11'] + # server is tested in a separate workflow + runtime_type: ['eventstream'] steps: - uses: actions/checkout@v4 - - name: Install poetry via pipx run: pipx install poetry - - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} cache: 'poetry' - - name: Install Python dependencies using Poetry run: make install-python-dependencies - - - name: Download sandbox Docker image + - name: Download Runtime Docker image uses: actions/download-artifact@v4 with: - name: sandbox-docker-image-amd64 + name: od_runtime-docker-image-amd64 path: /tmp/ - - - name: Load sandbox image and run integration tests - env: - SANDBOX_BOX_TYPE: ${{ matrix.sandbox }} + - name: Load runtime image and run integration tests run: | # Load the Docker image and capture the output - output=$(docker load -i /tmp/sandbox_image_amd64.tar) + if [ "${{ matrix.runtime_type }}" == "eventstream" ]; then + output=$(docker load -i /tmp/od_runtime_image_amd64.tar) + else + echo "No Runtime Docker image to load" + exit 1 + fi # Extract the first image name from the output image_name=$(echo "$output" | grep -oP 'Loaded image: \K.*' | head -n 1) @@ -167,48 +251,40 @@ jobs: # Print the full name of the image echo "Loaded Docker image: $image_name" - SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true TEST_ONLY=true ./tests/integration/regenerate.sh - + TEST_RUNTIME=${{ matrix.runtime_type }} SANDBOX_USER_ID=$(id -u) SANDBOX_CONTAINER_IMAGE=$image_name TEST_IN_CI=true TEST_ONLY=true ./tests/integration/regenerate.sh - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + # Push the OpenDevin and sandbox Docker images to the ghcr.io repository ghcr_push: runs-on: ubuntu-latest - # don't push if integration tests or sandbox tests fail - needs: [ghcr_build, integration-tests-on-linux, test-for-sandbox] + needs: [ghcr_build] if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') - env: tags: ${{ needs.ghcr_build.outputs.tags }} - permissions: contents: read packages: write - strategy: matrix: - image: ["sandbox", "opendevin"] - platform: ["amd64", "arm64"] - + image: ['opendevin'] + platform: ['amd64', 'arm64'] steps: - name: Checkout code uses: actions/checkout@v4 - - name: Login to GHCR uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Download Docker images uses: actions/download-artifact@v4 with: name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} path: /tmp/${{ matrix.platform }} - - name: Load images and push to registry run: | mv /tmp/${{ matrix.platform }}/${{ matrix.image }}_image_${{ matrix.platform }}.tar . @@ -223,33 +299,124 @@ jobs: docker push $image_name:${tag}_${{ matrix.platform }} done + # Push the runtime Docker images to the ghcr.io repository + ghcr_push_runtime: + runs-on: ubuntu-latest + needs: [ghcr_build_runtime, test_runtime, runtime_integration_tests_on_linux] + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') + env: + RUNTIME_TAGS: ${{ needs.ghcr_build_runtime.outputs.tags }} + permissions: + contents: read + packages: write + strategy: + matrix: + image: ['od_runtime'] + platform: ['amd64', 'arm64'] + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + tool-cache: true + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: false + swap-storage: true + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Download Docker images + uses: actions/download-artifact@v4 + with: + name: ${{ matrix.image }}-docker-image-${{ matrix.platform }} + path: /tmp/${{ matrix.platform }} + - name: List downloaded files + run: | + ls -la /tmp/${{ matrix.platform }} + file /tmp/${{ matrix.platform }}/* + - name: Load images and push to registry + run: | + mv /tmp/${{ matrix.platform }}/${{ matrix.image }}_image_${{ matrix.platform }}.tar ./${{ matrix.image }}_image_${{ matrix.platform }}.tar + if ! loaded_image=$(docker load -i ${{ matrix.image }}_image_${{ matrix.platform }}.tar | grep "Loaded image:" | head -n 1 | awk '{print $3}'); then + echo "Failed to load Docker image" + exit 1 + fi + echo "loaded image = $loaded_image" + image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') + echo "image name = $image_name" + echo "$RUNTIME_TAGS" | tr ' ' '\n' | while read -r tag; do + echo "tag = $tag" + if [ -n "$image_name" ] && [ -n "$tag" ]; then + docker tag $loaded_image $image_name:${tag}_${{ matrix.platform }} + docker push $image_name:${tag}_${{ matrix.platform }} + else + echo "Skipping tag and push due to empty image_name or tag" + fi + done + + # Creates and pushes the OpenDevin and sandbox Docker image manifests create_manifest: runs-on: ubuntu-latest needs: [ghcr_build, ghcr_push] if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') - env: tags: ${{ needs.ghcr_build.outputs.tags }} - strategy: matrix: - image: ["sandbox", "opendevin"] - + image: ['opendevin'] permissions: contents: read packages: write - steps: - name: Checkout code uses: actions/checkout@v4 - - name: Login to GHCR uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} + - name: Create and push multi-platform manifest + run: | + image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') + echo "image name = $image_name" + tags=$(echo ${tags} | tr ' ' '\n') + for tag in $tags; do + echo 'tag = $tag' + docker buildx imagetools create --tag $image_name:$tag \ + $image_name:${tag}_amd64 \ + $image_name:${tag}_arm64 + done + # Creates and pushes the runtime Docker image manifest + create_manifest_runtime: + runs-on: ubuntu-latest + needs: [ghcr_build_runtime, ghcr_push_runtime] + if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/') + env: + tags: ${{ needs.ghcr_build_runtime.outputs.tags }} + strategy: + matrix: + image: ['od_runtime'] + permissions: + contents: read + packages: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Login to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Create and push multi-platform manifest run: | image_name=$(echo "ghcr.io/${{ github.repository_owner }}/${{ matrix.image }}" | tr '[:upper:]' '[:lower:]') diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7233bdb25aee..98d905c6c3be 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,3 +1,4 @@ +# Workflow that runs lint on the frontend and python code name: Lint concurrency: @@ -11,27 +12,26 @@ on: pull_request: jobs: + # Run lint on the frontend code lint-frontend: name: Lint frontend runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Install Node.js 20 uses: actions/setup-node@v4 with: node-version: 20 - - name: Install dependencies run: | cd frontend npm install --frozen-lockfile - - name: Lint run: | cd frontend npm run lint + # Run lint on the python code lint-python: name: Lint python runs-on: ubuntu-latest diff --git a/.github/workflows/review-pr.yml b/.github/workflows/review-pr.yml index f38c65b8325d..6d7771e5701f 100644 --- a/.github/workflows/review-pr.yml +++ b/.github/workflows/review-pr.yml @@ -1,3 +1,4 @@ +# Workflow that uses OpenDevin to review a pull request. PR must be labeled 'review-this' name: Use OpenDevin to Review Pull Request on: @@ -22,16 +23,13 @@ jobs: run: | sudo apt-get install -y git gh git config --global --add safe.directory $PWD - - name: Checkout Repository uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.base.ref }} # check out the target branch - - name: Download Diff run: | curl -O "${{ github.event.pull_request.diff_url }}" -L - - name: Write Task File run: | echo "Your coworker wants to apply a pull request to this project." > task.txt @@ -45,19 +43,16 @@ jobs: echo "${{ github.event.pull_request.body }}" >> task.txt echo "" >> task.txt echo "Diff file is: ${{ github.event.pull_request.number }}.diff" >> task.txt - - name: Set up environment run: | curl -sSL https://install.python-poetry.org | python3 - export PATH="/github/home/.local/bin:$PATH" - poetry install --without evaluation + poetry install --without evaluation,llama-index poetry run playwright install --with-deps chromium - - name: Run OpenDevin env: LLM_API_KEY: ${{ secrets.LLM_API_KEY }} LLM_MODEL: ${{ vars.LLM_MODEL }} - SANDBOX_BOX_TYPE: ssh run: | # Append path to launch poetry export PATH="/github/home/.local/bin:$PATH" @@ -67,7 +62,6 @@ jobs: export WORKSPACE_BASE=$GITHUB_WORKSPACE echo -e "/exit\n" | poetry run python opendevin/core/main.py -i 50 -f task.txt rm task.txt - - name: Check if review file is non-empty id: check_file run: | @@ -76,7 +70,6 @@ jobs: echo "non_empty=true" >> $GITHUB_OUTPUT fi shell: bash - - name: Create PR review if file is non-empty env: GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/run-unit-tests.yml b/.github/workflows/run-unit-tests.yml index 5fb79fc1f9a7..fd3b9c23e0b6 100644 --- a/.github/workflows/run-unit-tests.yml +++ b/.github/workflows/run-unit-tests.yml @@ -1,3 +1,4 @@ +# Workflow that runs frontend and python unit tests name: Run Unit Tests concurrency: @@ -15,63 +16,52 @@ on: - 'evaluation/**' pull_request: -env: - PERSIST_SANDBOX : "false" jobs: + # Run frontend unit tests fe-test: runs-on: ubuntu-latest - strategy: matrix: node-version: [20] - steps: - name: Checkout uses: actions/checkout@v4 - - name: Set up Node.js uses: actions/setup-node@v4 with: node-version: ${{ matrix.node-version }} - - name: Install dependencies working-directory: ./frontend run: npm ci - - name: Run tests and collect coverage working-directory: ./frontend run: npm run test:coverage - - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + # Run python unit tests on macOS test-on-macos: name: Test on macOS runs-on: macos-12 env: - INSTALL_DOCKER: "1" # Set to '0' to skip Docker installation + INSTALL_DOCKER: '1' # Set to '0' to skip Docker installation strategy: matrix: - python-version: ["3.11"] - + python-version: ['3.11'] steps: - uses: actions/checkout@v4 - - name: Install poetry via pipx run: pipx install poetry - - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: "poetry" - + cache: 'poetry' - name: Install Python dependencies using Poetry - run: poetry install - + run: poetry install --without evaluation,llama-index - name: Install & Start Docker if: env.INSTALL_DOCKER == '1' run: | @@ -120,47 +110,39 @@ jobs: # For testcontainers to find the Colima socket # https://github.com/abiosoft/colima/blob/main/docs/FAQ.md#cannot-connect-to-the-docker-daemon-at-unixvarrundockersock-is-the-docker-daemon-running sudo ln -sf $HOME/.colima/default/docker.sock /var/run/docker.sock - - name: Build Environment run: make build - - name: Run Tests - run: poetry run pytest --forked --cov=agenthub --cov=opendevin --cov-report=xml ./tests/unit -k "not test_sandbox.py and not test_runtime.py" - + run: poetry run pytest --forked --cov=agenthub --cov=opendevin --cov-report=xml ./tests/unit -k "not test_runtime.py" - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} + + # Run python unit tests on Linux test-on-linux: name: Test on Linux runs-on: ubuntu-latest env: - INSTALL_DOCKER: "0" # Set to '0' to skip Docker installation + INSTALL_DOCKER: '0' # Set to '0' to skip Docker installation strategy: matrix: - python-version: ["3.11"] - + python-version: ['3.11'] steps: - uses: actions/checkout@v4 - - name: Install poetry via pipx run: pipx install poetry - - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - cache: "poetry" - + cache: 'poetry' - name: Install Python dependencies using Poetry - run: poetry install --without evaluation - + run: poetry install --without evaluation,llama-index - name: Build Environment run: make build - - name: Run Tests - run: poetry run pytest --forked --cov=agenthub --cov=opendevin --cov-report=xml ./tests/unit -k "not test_sandbox.py and not test_runtime.py" - + run: poetry run pytest --forked --cov=agenthub --cov=opendevin --cov-report=xml ./tests/unit -k "not test_runtime.py" - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 env: diff --git a/.github/workflows/solve-issue.yml b/.github/workflows/solve-issue.yml index df965b95542e..8e075761ab2a 100644 --- a/.github/workflows/solve-issue.yml +++ b/.github/workflows/solve-issue.yml @@ -1,3 +1,4 @@ +# Workflow that uses OpenDevin to resolve a GitHub issue. Issue must be labeled 'solve-this' name: Use OpenDevin to Resolve GitHub Issue on: @@ -17,14 +18,11 @@ jobs: image: ghcr.io/opendevin/opendevin volumes: - /var/run/docker.sock:/var/run/docker.sock - steps: - name: install git, github cli run: apt-get install -y git gh - - name: Checkout Repository uses: actions/checkout@v4 - - name: Write Task File env: ISSUE_TITLE: ${{ github.event.issue.title }} @@ -35,22 +33,18 @@ jobs: echo "" >> task.txt echo "BODY:" >> task.txt echo "${ISSUE_BODY}" >> task.txt - - name: Set up environment run: | curl -sSL https://install.python-poetry.org | python3 - export PATH="/github/home/.local/bin:$PATH" - poetry install --without evaluation + poetry install --without evaluation,llama-index poetry run playwright install --with-deps chromium - - - name: Run OpenDevin env: ISSUE_TITLE: ${{ github.event.issue.title }} ISSUE_BODY: ${{ github.event.issue.body }} LLM_API_KEY: ${{ secrets.OPENAI_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - SANDBOX_BOX_TYPE: ssh run: | # Append path to launch poetry export PATH="/github/home/.local/bin:$PATH" @@ -58,7 +52,6 @@ jobs: export PYTHONPATH=$(pwd):$PYTHONPATH WORKSPACE_MOUNT_PATH=$GITHUB_WORKSPACE poetry run python ./opendevin/core/main.py -i 50 -f task.txt -d $GITHUB_WORKSPACE rm task.txt - - name: Setup Git, Create Branch, and Commit Changes run: | # Setup Git configuration @@ -84,7 +77,6 @@ jobs: # Push changes git push --set-upstream origin $BRANCH_NAME - - name: Fetch Default Branch env: GH_TOKEN: ${{ github.token }} @@ -93,7 +85,6 @@ jobs: DEFAULT_BRANCH=$(gh repo view --json defaultBranchRef --jq .defaultBranchRef.name) echo "Default branch is $DEFAULT_BRANCH" echo "DEFAULT_BRANCH=$DEFAULT_BRANCH" >> $GITHUB_ENV - - name: Generate PR env: GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index b7e48311e480..6897fc79adea 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -1,4 +1,6 @@ +# Workflow that marks issues and PRs with no activity for 30 days with "Stale" and closes them after 7 more days of no activity name: 'Close stale issues' + on: schedule: - cron: '30 1 * * *' @@ -9,21 +11,9 @@ jobs: steps: - uses: actions/stale@v9 with: - # Aggressively close issues that have been explicitly labeled `age-out` - any-of-labels: age-out - stale-issue-message: 'This issue is stale because it has been open for 7 days with no activity. Remove stale label or comment or this will be closed in 1 day.' - close-issue-message: 'This issue was closed because it has been stalled for over 7 days with no activity.' - stale-pr-message: 'This PR is stale because it has been open for 7 days with no activity. Remove stale label or comment or this will be closed in 1 days.' - close-pr-message: 'This PR was closed because it has been stalled for over 7 days with no activity.' - days-before-stale: 7 - days-before-close: 1 - - - uses: actions/stale@v9 - with: - # Be more lenient with other issues stale-issue-message: 'This issue is stale because it has been open for 30 days with no activity. Remove stale label or comment or this will be closed in 7 days.' - close-issue-message: 'This issue was closed because it has been stalled for over 30 days with no activity.' stale-pr-message: 'This PR is stale because it has been open for 30 days with no activity. Remove stale label or comment or this will be closed in 7 days.' - close-pr-message: 'This PR was closed because it has been stalled for over 30 days with no activity.' days-before-stale: 30 + close-issue-message: 'This issue was closed because it has been stalled for over 30 days with no activity.' + close-pr-message: 'This PR was closed because it has been stalled for over 30 days with no activity.' days-before-close: 7 diff --git a/.github/workflows/update-pyproject-version.yml b/.github/workflows/update-pyproject-version.yml deleted file mode 100644 index 24fa5429c557..000000000000 --- a/.github/workflows/update-pyproject-version.yml +++ /dev/null @@ -1,48 +0,0 @@ -name: Update pyproject.toml Version and Tags - -on: - release: - types: - - published - -jobs: - update-pyproject-and-tags: - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 # Fetch all history for all branches and tags - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install toml - - - name: Get release tag - id: get_release_tag - run: echo "RELEASE_TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV - - - name: Update pyproject.toml with release tag - run: | - python -c " - import toml - with open('pyproject.toml', 'r') as f: - data = toml.load(f) - data['tool']['poetry']['version'] = '${{ env.RELEASE_TAG }}' - with open('pyproject.toml', 'w') as f: - toml.dump(data, f) - " - - - name: Commit and push pyproject.toml changes - uses: stefanzweifel/git-auto-commit-action@v4 - with: - commit_message: "Update pyproject.toml version to ${{ env.RELEASE_TAG }}" - branch: main - file_pattern: pyproject.toml diff --git a/.gitignore b/.gitignore index fbed82154901..cac17cdfa34f 100644 --- a/.gitignore +++ b/.gitignore @@ -169,6 +169,10 @@ evaluation/outputs evaluation/swe_bench/eval_workspace* evaluation/SWE-bench/data evaluation/webarena/scripts/webarena_env.sh +evaluation/bird/data +evaluation/gaia/data +evaluation/gorilla/data +evaluation/toolqa/data # frontend diff --git a/Makefile b/Makefile index 2e06fa01d2a8..a2d91d3527aa 100644 --- a/Makefile +++ b/Makefile @@ -23,9 +23,6 @@ RESET=$(shell tput -Txterm sgr0) build: @echo "$(GREEN)Building project...$(RESET)" @$(MAKE) -s check-dependencies -ifeq ($(INSTALL_DOCKER),) - @$(MAKE) -s pull-docker-image -endif @$(MAKE) -s install-python-dependencies @$(MAKE) -s install-frontend-dependencies @$(MAKE) -s install-pre-commit-hooks @@ -124,11 +121,6 @@ check-poetry: exit 1; \ fi -pull-docker-image: - @echo "$(YELLOW)Pulling Docker image...$(RESET)" - @docker pull $(DOCKER_IMAGE) - @echo "$(GREEN)Docker image pulled successfully.$(RESET)" - install-python-dependencies: @echo "$(GREEN)Installing Python dependencies...$(RESET)" @if [ -z "${TZ}" ]; then \ @@ -141,7 +133,7 @@ install-python-dependencies: export HNSWLIB_NO_NATIVE=1; \ poetry run pip install chroma-hnswlib; \ fi - @poetry install + @poetry install --without llama-index @if [ -f "/etc/manjaro-release" ]; then \ echo "$(BLUE)Detected Manjaro Linux. Installing Playwright dependencies...$(RESET)"; \ poetry run pip install playwright; \ @@ -195,42 +187,45 @@ build-frontend: @echo "$(YELLOW)Building frontend...$(RESET)" @cd frontend && npm run build -# Start backend +# Start backend server with auto-reload start-backend: @echo "$(YELLOW)Starting backend...$(RESET)" @poetry run uvicorn opendevin.server.listen:app --port $(BACKEND_PORT) --reload --reload-exclude "workspace/*" -# Start frontend +# Start frontend server start-frontend: @echo "$(YELLOW)Starting frontend...$(RESET)" - @cd frontend && VITE_BACKEND_HOST=$(BACKEND_HOST) VITE_FRONTEND_PORT=$(FRONTEND_PORT) npm run start + @if [ -n "$$WSL_DISTRO_NAME" ]; then \ + mode=dev_wsl; \ + else \ + mode=start; \ + fi; \ + @cd frontend && VITE_BACKEND_HOST=$(BACKEND_HOST) VITE_FRONTEND_PORT=$(FRONTEND_PORT) npm run $$mode -# Common setup for running the app (non-callable) -_run_setup: +# check for Windows (non-callable) +_run_check: @if [ "$(OS)" = "Windows_NT" ]; then \ echo "$(RED) Windows is not supported, use WSL instead!$(RESET)"; \ exit 1; \ fi @mkdir -p logs - @echo "$(YELLOW)Starting backend server...$(RESET)" - @poetry run uvicorn opendevin.server.listen:app --port $(BACKEND_PORT) & - @echo "$(YELLOW)Waiting for the backend to start...$(RESET)" - @until nc -z localhost $(BACKEND_PORT); do sleep 0.1; done - @echo "$(GREEN)Backend started successfully.$(RESET)" -# Run the app (standard mode) +# Run the app in standard mode for end-users run: @echo "$(YELLOW)Running the app...$(RESET)" - @$(MAKE) -s _run_setup - @cd frontend && echo "$(BLUE)Starting frontend with npm...$(RESET)" && npm run start -- --port $(FRONTEND_PORT) + @$(MAKE) -s _run_check + @poetry run uvicorn opendevin.server.listen:app --port $(BACKEND_PORT) & + @echo "$(YELLOW)Waiting for the app to start...$(RESET)" + @until nc -z localhost $(BACKEND_PORT); do sleep 0.1; done + @echo "$(GREEN)Application started successfully.$(RESET)" + +# Start both backend and frontend servers +start: + @echo "$(YELLOW)Start the app in dev mode...$(RESET)" + @$(MAKE) -s start-backend + @$(MAKE) -s start-frontend @echo "$(GREEN)Application started successfully.$(RESET)" -# Run the app (WSL mode) -run-wsl: - @echo "$(YELLOW)Running the app in WSL mode...$(RESET)" - @$(MAKE) -s _run_setup - @cd frontend && echo "$(BLUE)Starting frontend with npm (WSL mode)...$(RESET)" && npm run dev_wsl -- --port $(FRONTEND_PORT) - @echo "$(GREEN)Application started successfully in WSL mode.$(RESET)" # Setup config.toml setup-config: @@ -246,16 +241,6 @@ setup-config-prompts: workspace_dir=$${workspace_dir:-$(DEFAULT_WORKSPACE_DIR)}; \ echo "workspace_base=\"$$workspace_dir\"" >> $(CONFIG_FILE).tmp - @read -p "Do you want to persist the sandbox container? [true/false] [default: false]: " persist_sandbox; \ - persist_sandbox=$${persist_sandbox:-false}; \ - if [ "$$persist_sandbox" = "true" ]; then \ - read -p "Enter a password for the sandbox container: " ssh_password; \ - echo "ssh_password=\"$$ssh_password\"" >> $(CONFIG_FILE).tmp; \ - echo "persist_sandbox=$$persist_sandbox" >> $(CONFIG_FILE).tmp; \ - else \ - echo "persist_sandbox=$$persist_sandbox" >> $(CONFIG_FILE).tmp; \ - fi - @echo "" >> $(CONFIG_FILE).tmp @echo "[llm]" >> $(CONFIG_FILE).tmp @@ -301,6 +286,16 @@ clean: @rm -rf opendevin/.cache @echo "$(GREEN)Caches cleaned up successfully.$(RESET)" +# Kill all processes on port BACKEND_PORT and FRONTEND_PORT +kill: + @echo "$(YELLOW)Killing all processes on port $(BACKEND_PORT) and $(FRONTEND_PORT)...$(RESET)" + ports=$$(lsof -t -i:$(BACKEND_PORT) -i:$(FRONTEND_PORT)); \ + if [ -n "$$ports" ]; then \ + kill -9 $$ports; \ + echo "$(GREEN)Processes killed successfully.$(RESET)"; \ + else \ + echo "$(BLUE)No processes found on port $(BACKEND_PORT) and $(FRONTEND_PORT).$(RESET)"; \ + fi # Help help: @echo "$(BLUE)Usage: make [target]$(RESET)" @@ -309,11 +304,14 @@ help: @echo " $(GREEN)lint$(RESET) - Run linters on the project." @echo " $(GREEN)setup-config$(RESET) - Setup the configuration for OpenDevin by providing LLM API key," @echo " LLM Model name, and workspace directory." - @echo " $(GREEN)start-backend$(RESET) - Start the backend server for the OpenDevin project." + @echo " $(GREEN)start-backend$(RESET) - Start the backend server for the OpenDevin project with auto-reload." @echo " $(GREEN)start-frontend$(RESET) - Start the frontend server for the OpenDevin project." - @echo " $(GREEN)run$(RESET) - Run the OpenDevin application, starting both backend and frontend servers." + @echo " $(GREEN)start$(RESET) - Start both backend and frontend servers." + @echo " $(GREEN)run$(RESET) - Run the OpenDevin application for end-users." + @echo " $(GREEN)run-wsl$(RESET) - Run the OpenDevin application, starting both backend and frontend servers for WSL users." + @echo " $(GREEN)kill$(RESET) - Kill all processes on port 3000 and 3001." @echo " Backend Log file will be stored in the 'logs' directory." @echo " $(GREEN)help$(RESET) - Display this help message, providing information on available targets." # Phony targets -.PHONY: build check-dependencies check-python check-npm check-docker check-poetry pull-docker-image install-python-dependencies install-frontend-dependencies install-pre-commit-hooks lint start-backend start-frontend run run-wsl setup-config setup-config-prompts help +.PHONY: build check-dependencies check-python check-npm check-docker check-poetry install-python-dependencies install-frontend-dependencies install-pre-commit-hooks lint start-backend start-frontend start run run-wsl setup-config setup-config-prompts kill help diff --git a/README.md b/README.md index e2232ef19d5c..c2b884244809 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,35 @@ +The easiest way to run Kevin is to [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://github.com/codespaces/new/SmartManoj/Kevin) ๐Ÿ”ฅ๐Ÿ”ฅ๐Ÿ”ฅ + +The vision is to leverage SLMs effectively and work towards solving most of the issues on the SWE-Bench Lite evaluation. + +### Kevin Changelogs: + + 1) [Added Auto Mode](https://github.com/OpenDevin/OpenDevin/pull/2782) ๐Ÿ”ฅ๐Ÿ”ฅ๐Ÿ”ฅ + 2) [Restarted Jupyter kernel if package installed via bash too](https://github.com/OpenDevin/OpenDevin/pull/3178) ๐Ÿ‘ + 3) [Cleaned Browser Observattions](https://github.com/OpenDevin/OpenDevin/pull/3096) ๐Ÿงน + 4) [Showed relevant error in UI](https://github.com/OpenDevin/OpenDevin/pull/2657) ๐Ÿšจ + 5) [Added Event History Condenser](https://github.com/OpenDevin/OpenDevin/pull/2937) ๐Ÿ“œ + 6) [Feat: Persist sandbox for Event Runtime](https://github.com/SmartManoj/Kevin/commit/2200b21dd01ecf3618d7e676cf16f875c5fce154) ๐Ÿฅณ๐Ÿฅณ + 7) [Parsed pip output and restarted kernel automatically (for bash too)](https://github.com/SmartManoj/Kevin/commit/3b77d5b2ec592e0fcb5bd7ed8a0d5787378bc0de) ๐Ÿ“ฆ + 8) [Added editable address bar in browser tab](https://github.com/OpenDevin/OpenDevin/pull/3078) ๐ŸŒ + 9) [Include workspace contents if any at first step only.](https://github.com/OpenDevin/OpenDevin/pull/2865#issuecomment-2257487634) ๐Ÿ“‚ + 10) [Add start and kill modes in Makefile](https://github.com/OpenDevin/OpenDevin/pull/2850) ๐Ÿ“ณ + +### Bug Fixes: + 1) [Fixed GroqException - content must be a string for role system & assisstant](https://github.com/SmartManoj/Kevin/commit/30c98d458a299d789ebd6b8ada842c050bc91b20) ๐Ÿ› ๏ธ + 2) [Fixed GroqException - condense' is unsupported](https://github.com/SmartManoj/Kevin/commit/1ece04784beb657dccbf615b3085e72f23a73e77) ๐Ÿ› ๏ธ + 3) [Clear history when starting a new task](https://github.com/SmartManoj/Kevin/commit/f874e13fdd4ea50dcd0d8484639de40a1d6f66f4) ๐Ÿงน + 4) [Add miniforge path to synchronize bash and notebook](https://github.com/SmartManoj/Kevin/commit/6753d8b2b2b4e5a753cc4b3e26982d36464b6002) ๐Ÿ›ฃ๏ธ + 5) [Fixed frontend terminal prompt](https://github.com/SmartManoj/Kevin/commit/77950625b51a779b99533a9af616c97e640d5cd6) ๐Ÿ› ๏ธ + 6) [Set TERM variable in bash](https://github.com/SmartManoj/Kevin/ec84c3b633ac23effac9f096a68560abc7388d2f) ๐Ÿ› ๏ธ + +### Minor Changes: + 1) [Notify after task is finished](https://github.com/SmartManoj/Kevin/commit/cec8e7d9af109efc6abb099e2f9ac5b42b6650f6) ๐Ÿ“ข + +### Separate Feature Branches: + 1) [Added Tutor Agent](https://github.com/SmartManoj/Kevin/tree/add-tutor-agent) ๐Ÿง‘โ€๐Ÿซ +--- + B[OpenDevin Backend] + B -->|Builds| C[OD Runtime Image] + C -->|Launches| D[Runtime Client] + D -->|Initializes| E[Browser] + D -->|Initializes| F[Bash Shell] + D -->|Initializes| G[Plugins] + G -->|Initializes| L[Jupyter Server] + + B -->|Spawn| H[Agent] + B -->|Spawn| I[EventStream] + I <--->|Execute Action to + Get Observation + via REST API + | D + + H -->|Generate Action| I + I -->|Obtain Observation| H + + subgraph "Docker Container" + D + E + F + G + L + end +``` + +1. User Input: The user provides a custom base Docker image. + +2. Image Building: OpenDevin builds a new Docker image (the "OD runtime image") based on the user-provided image. This new image includes OpenDevin-specific code, primarily the "runtime client." + +3. Container Launch: When OpenDevin starts, it launches a Docker container using the OD runtime image. + +4. Client Initialization: The runtime client initializes inside the container, setting up necessary components like a bash shell and loading any specified plugins. + +5. Communication: The OpenDevin backend (`runtime.py`) communicates with the runtime client over RESTful API, sending actions and receiving observations. + +6. Action Execution: The runtime client receives actions from the backend, executes them in the sandboxed environment, and sends back observations. + +7. Observation Return: The client sends execution results back to the OpenDevin backend as observations. + + +The role of the client is crucial: +- It acts as an intermediary between the OpenDevin backend and the sandboxed environment. +- It executes various types of actions (shell commands, file operations, Python code, etc.) safely within the container. +- It manages the state of the sandboxed environment, including the current working directory and loaded plugins. +- It formats and returns observations to the backend, ensuring a consistent interface for processing results. + + +## Advanced: How OpenDevin builds and maintains OD Runtime images + +OpenDevin uses a sophisticated approach to build and manage runtime images. This process ensures efficiency, consistency, and flexibility in creating and maintaining Docker images for both production and development environments. + +Check out [relavant code](https://github.com/OpenDevin/OpenDevin/blob/main/opendevin/runtime/utils/runtime_build.py) if you are interested in more details. + +### Image Tagging System + +OpenDevin uses a dual-tagging system for its runtime images to balance reproducibility with flexibility: + +1. Hash-based tag: `{target_image_repo}:{target_image_hash_tag}` + Example: `od_runtime:abc123def456` + + - This tag is based on the MD5 hash of the Docker build folder, which includes the source code (of runtime client and related dependencies) and Dockerfile. + - Identical hash tags guarantee that the images were built with exactly the same source code and Dockerfile. + - This ensures reproducibility: the same hash always means the same image contents. + +2. Generic tag: `{target_image_repo}:{target_image_tag}` + Example: `od_runtime:od_v0.8.3_ubuntu_tag_22.04` + + - This tag follows the format: `od_runtime:od_v{OD_VERSION}_{BASE_IMAGE_NAME}_tag_{BASE_IMAGE_TAG}` + - It represents the latest build for a particular base image and OpenDevin version combination. + - This tag is updated whenever a new image is built from the same base image, even if the source code changes. + +The hash-based tag ensures exact reproducibility, while the generic tag provides a stable reference to the latest version of a particular configuration. This dual-tagging approach allows OpenDevin to efficiently manage both development and production environments. + +### Build Process + +1. Image Naming Convention: + - Hash-based tag: `{target_image_repo}:{target_image_hash_tag}` + Example: `od_runtime:abc123def456` + - Generic tag: `{target_image_repo}:{target_image_tag}` + Example: `od_runtime:od_v0.8.3_ubuntu_tag_22.04` + +2. Build Process: + - a. Convert the base image name to an OD runtime image name. + Example: `ubuntu:22.04` -> `od_runtime:od_v0.8.3_ubuntu_tag_22.04` + - b. Generate a build context (Dockerfile and OpenDevin source code) and calculate its hash. + - c. Check for an existing image with the calculated hash. + - d. If not found, check for a recent compatible image to use as a base. + - e. If no compatible image exists, build from scratch using the original base image. + - f. Tag the new image with both hash-based and generic tags. + +3. Image Reuse and Rebuilding Logic: + The system follows these steps to determine whether to build a new image or use an existing one from a user-provided (base) image (e.g., `ubuntu:22.04`): + + a. If an image exists with the same hash (e.g., `od_runtime:abc123def456`), it will be reused as is. + + b. If the exact hash is not found, the system will try to rebuild using the latest generic image (e.g., `od_runtime:od_v0.8.3_ubuntu_tag_22.04`) as a base. This saves time by leveraging existing dependencies. + + c. If neither the hash-tagged nor the generic-tagged image is found, the system will build the image completely from scratch. + +4. Caching and Efficiency: + - The system attempts to reuse existing images when possible to save build time. + - If an exact match (by hash) is found, it's used without rebuilding. + - If a compatible image is found, it's used as a base for rebuilding, saving time on dependency installation. + +Here's a flowchart illustrating the build process: + +```mermaid +flowchart TD + A[Start] --> B{Convert base image name} + B --> |ubuntu:22.04 -> od_runtime:od_v0.8.3_ubuntu_tag_22.04| C[Generate build context and hash] + C --> D{Check for existing image with hash} + D -->|Found od_runtime:abc123def456| E[Use existing image] + D -->|Not found| F{Check for od_runtime:od_v0.8.3_ubuntu_tag_22.04} + F -->|Found| G[Rebuild based on recent image] + F -->|Not found| H[Build from scratch] + G --> I[Tag with hash and generic tags] + H --> I + E --> J[End] + I --> J +``` + +This approach ensures that: + +1. Identical source code and Dockerfile always produce the same image (via hash-based tags). +2. The system can quickly rebuild images when minor changes occur (by leveraging recent compatible images). +3. The generic tag (e.g., `od_runtime:od_v0.8.3_ubuntu_tag_22.04`) always points to the latest build for a particular base image and OpenDevin version combination. + +By using this method, OpenDevin maintains an efficient and flexible system for building and managing runtime images, adapting to both development needs and production requirements. + + +## Advanced: Runtime Plugin System + +The OpenDevin Runtime supports a plugin system that allows for extending functionality and customizing the runtime environment. Plugins are initialized when the runtime client starts up. + +Check [an example of Jupyter plugin here](https://github.com/OpenDevin/OpenDevin/blob/9c44d94cef32e6426ebd8deeeb52963153b2348a/opendevin/runtime/plugins/jupyter/__init__.py#L30-L63) if you want to implement your own plugin. + +*More details about the Plugin system are still under construction - contributions are welcomed!* + +Key aspects of the plugin system: + +1. Plugin Definition: Plugins are defined as Python classes that inherit from a base `Plugin` class. + +2. Plugin Registration: Available plugins are registered in an `ALL_PLUGINS` dictionary. + +3. Plugin Specification: Plugins are associate with `Agent.sandbox_plugins: list[PluginRequirement]`. Users can specify which plugins to load when initializing the runtime. + +4. Initialization: Plugins are initialized asynchronously when the runtime client starts. + +5. Usage: The runtime client can use initialized plugins to extend its capabilities (e.g., the JupyterPlugin for running IPython cells). diff --git a/docs/package-lock.json b/docs/package-lock.json index edfb57026997..935c0614a685 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -11,6 +11,7 @@ "@docusaurus/core": "^3.4.0", "@docusaurus/plugin-content-pages": "^3.4.0", "@docusaurus/preset-classic": "^3.4.0", + "@docusaurus/theme-mermaid": "^3.4.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", @@ -2083,6 +2084,11 @@ "node": ">=6.9.0" } }, + "node_modules/@braintree/sanitize-url": { + "version": "6.0.4", + "resolved": "https://registry.npmmirror.com/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", + "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==" + }, "node_modules/@colors/colors": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", @@ -2579,6 +2585,27 @@ "react-dom": "^18.0.0" } }, + "node_modules/@docusaurus/theme-mermaid": { + "version": "3.4.0", + "resolved": "https://registry.npmmirror.com/@docusaurus/theme-mermaid/-/theme-mermaid-3.4.0.tgz", + "integrity": "sha512-3w5QW0HEZ2O6x2w6lU3ZvOe1gNXP2HIoKDMJBil1VmLBc9PmpAG17VmfhI/p3L2etNmOiVs5GgniUqvn8AFEGQ==", + "dependencies": { + "@docusaurus/core": "3.4.0", + "@docusaurus/module-type-aliases": "3.4.0", + "@docusaurus/theme-common": "3.4.0", + "@docusaurus/types": "3.4.0", + "@docusaurus/utils-validation": "3.4.0", + "mermaid": "^10.4.0", + "tslib": "^2.6.0" + }, + "engines": { + "node": ">=18.0" + }, + "peerDependencies": { + "react": "^18.0.0", + "react-dom": "^18.0.0" + } + }, "node_modules/@docusaurus/theme-search-algolia": { "version": "3.4.0", "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.4.0.tgz", @@ -3291,6 +3318,24 @@ "@types/node": "*" } }, + "node_modules/@types/d3-scale": { + "version": "4.0.8", + "resolved": "https://registry.npmmirror.com/@types/d3-scale/-/d3-scale-4.0.8.tgz", + "integrity": "sha512-gkK1VVTr5iNiYJ7vWDI+yUFFlszhNMtVeneJ6lUTKPjprsvLLI9/tgEGiXJOnlINJA8FyA88gfnQsHbybVZrYQ==", + "dependencies": { + "@types/d3-time": "*" + } + }, + "node_modules/@types/d3-scale-chromatic": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz", + "integrity": "sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw==" + }, + "node_modules/@types/d3-time": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/@types/d3-time/-/d3-time-3.0.3.tgz", + "integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw==" + }, "node_modules/@types/debug": { "version": "4.1.12", "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", @@ -4976,6 +5021,14 @@ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" }, + "node_modules/cose-base": { + "version": "1.0.3", + "resolved": "https://registry.npmmirror.com/cose-base/-/cose-base-1.0.3.tgz", + "integrity": "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==", + "dependencies": { + "layout-base": "^1.0.0" + } + }, "node_modules/cosmiconfig": { "version": "8.3.6", "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz", @@ -5320,472 +5373,955 @@ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==" }, - "node_modules/debounce": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", - "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + "node_modules/cytoscape": { + "version": "3.30.1", + "resolved": "https://registry.npmmirror.com/cytoscape/-/cytoscape-3.30.1.tgz", + "integrity": "sha512-TRJc3HbBPkHd50u9YfJh2FxD1lDLZ+JXnJoyBn5LkncoeuT7fapO/Hq/Ed8TdFclaKshzInge2i30bg7VKeoPQ==", + "engines": { + "node": ">=0.10" + } }, - "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dependencies": { - "ms": "2.1.2" + "node_modules/cytoscape-cose-bilkent": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz", + "integrity": "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==", + "dependencies": { + "cose-base": "^1.0.0" + }, + "peerDependencies": { + "cytoscape": "^3.2.0" + } + }, + "node_modules/d3": { + "version": "7.9.0", + "resolved": "https://registry.npmmirror.com/d3/-/d3-7.9.0.tgz", + "integrity": "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==", + "dependencies": { + "d3-array": "3", + "d3-axis": "3", + "d3-brush": "3", + "d3-chord": "3", + "d3-color": "3", + "d3-contour": "4", + "d3-delaunay": "6", + "d3-dispatch": "3", + "d3-drag": "3", + "d3-dsv": "3", + "d3-ease": "3", + "d3-fetch": "3", + "d3-force": "3", + "d3-format": "3", + "d3-geo": "3", + "d3-hierarchy": "3", + "d3-interpolate": "3", + "d3-path": "3", + "d3-polygon": "3", + "d3-quadtree": "3", + "d3-random": "3", + "d3-scale": "4", + "d3-scale-chromatic": "3", + "d3-selection": "3", + "d3-shape": "3", + "d3-time": "3", + "d3-time-format": "4", + "d3-timer": "3", + "d3-transition": "3", + "d3-zoom": "3" }, "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } + "node": ">=12" } }, - "node_modules/decode-named-character-reference": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", - "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", + "node_modules/d3-array": { + "version": "3.2.4", + "resolved": "https://registry.npmmirror.com/d3-array/-/d3-array-3.2.4.tgz", + "integrity": "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==", "dependencies": { - "character-entities": "^2.0.0" + "internmap": "1 - 2" }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "engines": { + "node": ">=12" } }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "node_modules/d3-axis": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/d3-axis/-/d3-axis-3.0.0.tgz", + "integrity": "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/d3-brush": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/d3-brush/-/d3-brush-3.0.0.tgz", + "integrity": "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==", "dependencies": { - "mimic-response": "^3.1.0" + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "3", + "d3-transition": "3" }, "engines": { - "node": ">=10" + "node": ">=12" + } + }, + "node_modules/d3-chord": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-chord/-/d3-chord-3.0.1.tgz", + "integrity": "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==", + "dependencies": { + "d3-path": "1 - 3" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=12" } }, - "node_modules/decompress-response/node_modules/mimic-response": { + "node_modules/d3-color": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "resolved": "https://registry.npmmirror.com/d3-color/-/d3-color-3.1.0.tgz", + "integrity": "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==", "engines": { - "node": ">=10" + "node": ">=12" + } + }, + "node_modules/d3-contour": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/d3-contour/-/d3-contour-4.0.2.tgz", + "integrity": "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==", + "dependencies": { + "d3-array": "^3.2.0" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=12" } }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "node_modules/d3-delaunay": { + "version": "6.0.4", + "resolved": "https://registry.npmmirror.com/d3-delaunay/-/d3-delaunay-6.0.4.tgz", + "integrity": "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==", + "dependencies": { + "delaunator": "5" + }, "engines": { - "node": ">=4.0.0" + "node": ">=12" } }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "node_modules/d3-dispatch": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-dispatch/-/d3-dispatch-3.0.1.tgz", + "integrity": "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==", "engines": { - "node": ">=0.10.0" + "node": ">=12" } }, - "node_modules/default-gateway": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", - "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "node_modules/d3-drag": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/d3-drag/-/d3-drag-3.0.0.tgz", + "integrity": "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==", "dependencies": { - "execa": "^5.0.0" + "d3-dispatch": "1 - 3", + "d3-selection": "3" }, "engines": { - "node": ">= 10" + "node": ">=12" } }, - "node_modules/defer-to-connect": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", - "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "node_modules/d3-dsv": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-dsv/-/d3-dsv-3.0.1.tgz", + "integrity": "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==", + "dependencies": { + "commander": "7", + "iconv-lite": "0.6", + "rw": "1" + }, + "bin": { + "csv2json": "bin/dsv2json.js", + "csv2tsv": "bin/dsv2dsv.js", + "dsv2dsv": "bin/dsv2dsv.js", + "dsv2json": "bin/dsv2json.js", + "json2csv": "bin/json2dsv.js", + "json2dsv": "bin/json2dsv.js", + "json2tsv": "bin/json2dsv.js", + "tsv2csv": "bin/dsv2dsv.js", + "tsv2json": "bin/dsv2json.js" + }, "engines": { - "node": ">=10" + "node": ">=12" } }, - "node_modules/define-data-property": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", - "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "node_modules/d3-dsv/node_modules/commander": { + "version": "7.2.0", + "resolved": "https://registry.npmmirror.com/commander/-/commander-7.2.0.tgz", + "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/d3-dsv/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "gopd": "^1.0.1" + "safer-buffer": ">= 2.1.2 < 3.0.0" }, "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "node": ">=0.10.0" } }, - "node_modules/define-lazy-prop": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", - "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "node_modules/d3-ease": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-ease/-/d3-ease-3.0.1.tgz", + "integrity": "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w==", "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/define-properties": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", - "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "node_modules/d3-fetch": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-fetch/-/d3-fetch-3.0.1.tgz", + "integrity": "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==", "dependencies": { - "define-data-property": "^1.0.1", - "has-property-descriptors": "^1.0.0", - "object-keys": "^1.1.1" + "d3-dsv": "1 - 3" }, "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" + "node": ">=12" } }, - "node_modules/del": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", - "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "node_modules/d3-force": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/d3-force/-/d3-force-3.0.0.tgz", + "integrity": "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==", "dependencies": { - "globby": "^11.0.1", - "graceful-fs": "^4.2.4", - "is-glob": "^4.0.1", - "is-path-cwd": "^2.2.0", - "is-path-inside": "^3.0.2", - "p-map": "^4.0.0", - "rimraf": "^3.0.2", - "slash": "^3.0.0" + "d3-dispatch": "1 - 3", + "d3-quadtree": "1 - 3", + "d3-timer": "1 - 3" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=12" } }, - "node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "node_modules/d3-format": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/d3-format/-/d3-format-3.1.0.tgz", + "integrity": "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==", "engines": { - "node": ">= 0.8" + "node": ">=12" } }, - "node_modules/dequal": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", - "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "node_modules/d3-geo": { + "version": "3.1.1", + "resolved": "https://registry.npmmirror.com/d3-geo/-/d3-geo-3.1.1.tgz", + "integrity": "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==", + "dependencies": { + "d3-array": "2.5.0 - 3" + }, "engines": { - "node": ">=6" + "node": ">=12" } }, - "node_modules/destroy": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", - "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "node_modules/d3-hierarchy": { + "version": "3.1.2", + "resolved": "https://registry.npmmirror.com/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz", + "integrity": "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==", "engines": { - "node": ">= 0.8", - "npm": "1.2.8000 || >= 1.4.16" + "node": ">=12" } }, - "node_modules/detect-node": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", - "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" - }, - "node_modules/detect-port": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", - "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "node_modules/d3-interpolate": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-interpolate/-/d3-interpolate-3.0.1.tgz", + "integrity": "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==", "dependencies": { - "address": "^1.0.1", - "debug": "4" + "d3-color": "1 - 3" }, - "bin": { - "detect": "bin/detect-port.js", - "detect-port": "bin/detect-port.js" + "engines": { + "node": ">=12" } }, - "node_modules/detect-port-alt": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", - "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", - "dependencies": { - "address": "^1.0.1", - "debug": "^2.6.0" - }, - "bin": { - "detect": "bin/detect-port", - "detect-port": "bin/detect-port" - }, + "node_modules/d3-path": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/d3-path/-/d3-path-3.1.0.tgz", + "integrity": "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==", "engines": { - "node": ">= 4.2.1" + "node": ">=12" } }, - "node_modules/detect-port-alt/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dependencies": { - "ms": "2.0.0" + "node_modules/d3-polygon": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-polygon/-/d3-polygon-3.0.1.tgz", + "integrity": "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==", + "engines": { + "node": ">=12" } }, - "node_modules/detect-port-alt/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" - }, - "node_modules/devlop": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", - "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", - "dependencies": { - "dequal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "node_modules/d3-quadtree": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-quadtree/-/d3-quadtree-3.0.1.tgz", + "integrity": "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==", + "engines": { + "node": ">=12" } }, - "node_modules/dir-glob": { + "node_modules/d3-random": { "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dependencies": { - "path-type": "^4.0.0" - }, + "resolved": "https://registry.npmmirror.com/d3-random/-/d3-random-3.0.1.tgz", + "integrity": "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==", "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/dns-packet": { - "version": "5.6.1", - "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", - "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "node_modules/d3-sankey": { + "version": "0.12.3", + "resolved": "https://registry.npmmirror.com/d3-sankey/-/d3-sankey-0.12.3.tgz", + "integrity": "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==", "dependencies": { - "@leichtgewicht/ip-codec": "^2.0.1" - }, - "engines": { - "node": ">=6" + "d3-array": "1 - 2", + "d3-shape": "^1.2.0" } }, - "node_modules/dom-converter": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", - "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "node_modules/d3-sankey/node_modules/d3-array": { + "version": "2.12.1", + "resolved": "https://registry.npmmirror.com/d3-array/-/d3-array-2.12.1.tgz", + "integrity": "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==", "dependencies": { - "utila": "~0.4" + "internmap": "^1.0.0" } }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "node_modules/d3-sankey/node_modules/d3-path": { + "version": "1.0.9", + "resolved": "https://registry.npmmirror.com/d3-path/-/d3-path-1.0.9.tgz", + "integrity": "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==" + }, + "node_modules/d3-sankey/node_modules/d3-shape": { + "version": "1.3.7", + "resolved": "https://registry.npmmirror.com/d3-shape/-/d3-shape-1.3.7.tgz", + "integrity": "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==", "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + "d3-path": "1" } }, - "node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ] + "node_modules/d3-sankey/node_modules/internmap": { + "version": "1.0.1", + "resolved": "https://registry.npmmirror.com/internmap/-/internmap-1.0.1.tgz", + "integrity": "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==" }, - "node_modules/domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "node_modules/d3-scale": { + "version": "4.0.2", + "resolved": "https://registry.npmmirror.com/d3-scale/-/d3-scale-4.0.2.tgz", + "integrity": "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==", "dependencies": { - "domelementtype": "^2.3.0" + "d3-array": "2.10.0 - 3", + "d3-format": "1 - 3", + "d3-interpolate": "1.2.0 - 3", + "d3-time": "2.1.1 - 3", + "d3-time-format": "2 - 4" }, "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" + "node": ">=12" } }, - "node_modules/domutils": { + "node_modules/d3-scale-chromatic": { "version": "3.1.0", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", - "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "resolved": "https://registry.npmmirror.com/d3-scale-chromatic/-/d3-scale-chromatic-3.1.0.tgz", + "integrity": "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==", "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3" + "d3-color": "1 - 3", + "d3-interpolate": "1 - 3" }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" + "engines": { + "node": ">=12" } }, - "node_modules/dot-case": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", - "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", - "dependencies": { - "no-case": "^3.0.4", - "tslib": "^2.0.3" + "node_modules/d3-selection": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/d3-selection/-/d3-selection-3.0.0.tgz", + "integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==", + "engines": { + "node": ">=12" } }, - "node_modules/dot-prop": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", - "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "node_modules/d3-shape": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/d3-shape/-/d3-shape-3.2.0.tgz", + "integrity": "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==", "dependencies": { - "is-obj": "^2.0.0" + "d3-path": "^3.1.0" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=12" } }, - "node_modules/dot-prop/node_modules/is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "node_modules/d3-time": { + "version": "3.1.0", + "resolved": "https://registry.npmmirror.com/d3-time/-/d3-time-3.1.0.tgz", + "integrity": "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q==", + "dependencies": { + "d3-array": "2 - 3" + }, "engines": { - "node": ">=8" + "node": ">=12" } }, - "node_modules/duplexer": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", - "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" - }, - "node_modules/electron-to-chromium": { - "version": "1.4.748", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.748.tgz", - "integrity": "sha512-VWqjOlPZn70UZ8FTKUOkUvBLeTQ0xpty66qV0yJcAGY2/CthI4xyW9aEozRVtuwv3Kpf5xTesmJUcPwuJmgP4A==" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" - }, - "node_modules/emojilib": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", - "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" - }, - "node_modules/emojis-list": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", - "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "node_modules/d3-time-format": { + "version": "4.1.0", + "resolved": "https://registry.npmmirror.com/d3-time-format/-/d3-time-format-4.1.0.tgz", + "integrity": "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg==", + "dependencies": { + "d3-time": "1 - 3" + }, "engines": { - "node": ">= 4" + "node": ">=12" } }, - "node_modules/emoticon": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", - "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "node_modules/d3-timer": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-timer/-/d3-timer-3.0.1.tgz", + "integrity": "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA==", + "engines": { + "node": ">=12" } }, - "node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "node_modules/d3-transition": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/d3-transition/-/d3-transition-3.0.1.tgz", + "integrity": "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==", + "dependencies": { + "d3-color": "1 - 3", + "d3-dispatch": "1 - 3", + "d3-ease": "1 - 3", + "d3-interpolate": "1 - 3", + "d3-timer": "1 - 3" + }, "engines": { - "node": ">= 0.8" + "node": ">=12" + }, + "peerDependencies": { + "d3-selection": "2 - 3" } }, - "node_modules/enhanced-resolve": { - "version": "5.16.0", - "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz", - "integrity": "sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA==", + "node_modules/d3-zoom": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/d3-zoom/-/d3-zoom-3.0.0.tgz", + "integrity": "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==", "dependencies": { - "graceful-fs": "^4.2.4", - "tapable": "^2.2.0" + "d3-dispatch": "1 - 3", + "d3-drag": "2 - 3", + "d3-interpolate": "1 - 3", + "d3-selection": "2 - 3", + "d3-transition": "2 - 3" }, "engines": { - "node": ">=10.13.0" + "node": ">=12" } }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" + "node_modules/dagre-d3-es": { + "version": "7.0.10", + "resolved": "https://registry.npmmirror.com/dagre-d3-es/-/dagre-d3-es-7.0.10.tgz", + "integrity": "sha512-qTCQmEhcynucuaZgY5/+ti3X/rnszKZhEQH/ZdWdtP1tA/y3VoHJzcVrO9pjjJCNpigfscAtoUB5ONcd2wNn0A==", + "dependencies": { + "d3": "^7.8.2", + "lodash-es": "^4.17.21" } }, - "node_modules/error-ex": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", - "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "node_modules/dayjs": { + "version": "1.11.12", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.12.tgz", + "integrity": "sha512-Rt2g+nTbLlDWZTwwrIXjy9MeiZmSDI375FvZs72ngxx8PDC6YXOeR3q5LAuPzjZQxhiWdRKac7RKV+YyQYfYIg==" + }, + "node_modules/debounce": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/debounce/-/debounce-1.2.1.tgz", + "integrity": "sha512-XRRe6Glud4rd/ZGQfiV1ruXSfbvfJedlV9Y6zOlP+2K04vBYiJEte6stfFkCP03aMnY5tsipamumUjL14fofug==" + }, + "node_modules/debug": { + "version": "4.3.4", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", + "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", "dependencies": { - "is-arrayish": "^0.2.1" + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } } }, - "node_modules/error-stack-parser": { - "version": "2.1.4", - "resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz", - "integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==", + "node_modules/decode-named-character-reference": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz", + "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==", "dependencies": { - "stackframe": "^1.3.4" + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/es-define-property": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", - "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", "dependencies": { - "get-intrinsic": "^1.2.4" + "mimic-response": "^3.1.0" }, "engines": { - "node": ">= 0.4" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "node_modules/decompress-response/node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", "engines": { - "node": ">= 0.4" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/es-module-lexer": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.0.tgz", + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-gateway": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz", + "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==", + "dependencies": { + "execa": "^5.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/defer-to-connect": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", + "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==", + "engines": { + "node": ">=10" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-lazy-prop": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz", + "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==", + "engines": { + "node": ">=8" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/del": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz", + "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==", + "dependencies": { + "globby": "^11.0.1", + "graceful-fs": "^4.2.4", + "is-glob": "^4.0.1", + "is-path-cwd": "^2.2.0", + "is-path-inside": "^3.0.2", + "p-map": "^4.0.0", + "rimraf": "^3.0.2", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/delaunator": { + "version": "5.0.1", + "resolved": "https://registry.npmmirror.com/delaunator/-/delaunator-5.0.1.tgz", + "integrity": "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==", + "dependencies": { + "robust-predicates": "^3.0.2" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz", + "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/detect-node": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz", + "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==" + }, + "node_modules/detect-port": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz", + "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==", + "dependencies": { + "address": "^1.0.1", + "debug": "4" + }, + "bin": { + "detect": "bin/detect-port.js", + "detect-port": "bin/detect-port.js" + } + }, + "node_modules/detect-port-alt": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz", + "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==", + "dependencies": { + "address": "^1.0.1", + "debug": "^2.6.0" + }, + "bin": { + "detect": "bin/detect-port", + "detect-port": "bin/detect-port" + }, + "engines": { + "node": ">= 4.2.1" + } + }, + "node_modules/detect-port-alt/node_modules/debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/detect-port-alt/node_modules/ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://registry.npmmirror.com/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dns-packet": { + "version": "5.6.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.1.tgz", + "integrity": "sha512-l4gcSouhcgIKRvyy99RNVOgxXiicE+2jZoNmaNmZ6JXiGajBOJAesk1OBlJuM5k2c+eudGdLxDqXuPCKIj6kpw==", + "dependencies": { + "@leichtgewicht/ip-codec": "^2.0.1" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/dom-converter": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz", + "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==", + "dependencies": { + "utila": "~0.4" + } + }, + "node_modules/dom-serializer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", + "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", + "dependencies": { + "domelementtype": "^2.3.0", + "domhandler": "^5.0.2", + "entities": "^4.2.0" + }, + "funding": { + "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" + } + }, + "node_modules/domelementtype": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", + "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fb55" + } + ] + }, + "node_modules/domhandler": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", + "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", + "dependencies": { + "domelementtype": "^2.3.0" + }, + "engines": { + "node": ">= 4" + }, + "funding": { + "url": "https://github.com/fb55/domhandler?sponsor=1" + } + }, + "node_modules/dompurify": { + "version": "3.1.6", + "resolved": "https://registry.npmmirror.com/dompurify/-/dompurify-3.1.6.tgz", + "integrity": "sha512-cTOAhc36AalkjtBpfG6O8JimdTMWNXjiePT2xQH/ppBGi/4uIpmj8eKyIkMJErXWARyINV/sB38yf8JCLF5pbQ==" + }, + "node_modules/domutils": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz", + "integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==", + "dependencies": { + "dom-serializer": "^2.0.0", + "domelementtype": "^2.3.0", + "domhandler": "^5.0.3" + }, + "funding": { + "url": "https://github.com/fb55/domutils?sponsor=1" + } + }, + "node_modules/dot-case": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz", + "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==", + "dependencies": { + "no-case": "^3.0.4", + "tslib": "^2.0.3" + } + }, + "node_modules/dot-prop": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-6.0.1.tgz", + "integrity": "sha512-tE7ztYzXHIeyvc7N+hR3oi7FIbf/NIjVP9hmAt3yMXzrQ072/fpjGLx2GxNxGxUl5V73MEqYzioOMoVhGMJ5cA==", + "dependencies": { + "is-obj": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/dot-prop/node_modules/is-obj": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==", + "engines": { + "node": ">=8" + } + }, + "node_modules/duplexer": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz", + "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==" + }, + "node_modules/electron-to-chromium": { + "version": "1.4.748", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.748.tgz", + "integrity": "sha512-VWqjOlPZn70UZ8FTKUOkUvBLeTQ0xpty66qV0yJcAGY2/CthI4xyW9aEozRVtuwv3Kpf5xTesmJUcPwuJmgP4A==" + }, + "node_modules/elkjs": { + "version": "0.9.3", + "resolved": "https://registry.npmmirror.com/elkjs/-/elkjs-0.9.3.tgz", + "integrity": "sha512-f/ZeWvW/BCXbhGEf1Ujp29EASo/lk1FDnETgNKwJrsVvGZhUWCZyg3xLJjAsxfOmt8KjswHmI5EwCQcPMpOYhQ==" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==" + }, + "node_modules/emojilib": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/emojilib/-/emojilib-2.4.0.tgz", + "integrity": "sha512-5U0rVMU5Y2n2+ykNLQqMoqklN9ICBT/KsvC1Gz6vqHbz2AXXGkG+Pm5rMWk/8Vjrr/mY9985Hi8DYzn1F09Nyw==" + }, + "node_modules/emojis-list": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==", + "engines": { + "node": ">= 4" + } + }, + "node_modules/emoticon": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-4.0.1.tgz", + "integrity": "sha512-dqx7eA9YaqyvYtUhJwT4rC1HIp82j5ybS1/vQ42ur+jBe17dJMwZE4+gvL1XadSFfxaPFFGt3Xsw+Y8akThDlw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.16.0", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz", + "integrity": "sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA==", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/error-stack-parser": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/error-stack-parser/-/error-stack-parser-2.1.4.tgz", + "integrity": "sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ==", + "dependencies": { + "stackframe": "^1.3.4" + } + }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.5.0.tgz", "integrity": "sha512-pqrTKmwEIgafsYZAGw9kszYzmagcE/n4dbgwGWLEXg7J4QFJVQRBld8j3Q3GNez79jzxZshq0bcT962QHOghjw==" }, "node_modules/escalade": { @@ -7539,6 +8075,14 @@ "css-in-js-utils": "^3.1.0" } }, + "node_modules/internmap": { + "version": "2.0.3", + "resolved": "https://registry.npmmirror.com/internmap/-/internmap-2.0.3.tgz", + "integrity": "sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==", + "engines": { + "node": ">=12" + } + }, "node_modules/interpret": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", @@ -7980,6 +8524,29 @@ "graceful-fs": "^4.1.6" } }, + "node_modules/katex": { + "version": "0.16.11", + "resolved": "https://registry.npmmirror.com/katex/-/katex-0.16.11.tgz", + "integrity": "sha512-RQrI8rlHY92OLf3rho/Ts8i/XvjgguEjOkO1BEXcU3N8BqPpSzBNwV/G0Ukr+P/l3ivvJUE/Fa/CwbS6HesGNQ==", + "funding": [ + "https://opencollective.com/katex", + "https://github.com/sponsors/katex" + ], + "dependencies": { + "commander": "^8.3.0" + }, + "bin": { + "katex": "cli.js" + } + }, + "node_modules/katex/node_modules/commander": { + "version": "8.3.0", + "resolved": "https://registry.npmmirror.com/commander/-/commander-8.3.0.tgz", + "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==", + "engines": { + "node": ">= 12" + } + }, "node_modules/keyv": { "version": "4.5.4", "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", @@ -7988,6 +8555,11 @@ "json-buffer": "3.0.1" } }, + "node_modules/khroma": { + "version": "2.1.0", + "resolved": "https://registry.npmmirror.com/khroma/-/khroma-2.1.0.tgz", + "integrity": "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==" + }, "node_modules/kind-of": { "version": "6.0.3", "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", @@ -8027,6 +8599,11 @@ "shell-quote": "^1.8.1" } }, + "node_modules/layout-base": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/layout-base/-/layout-base-1.0.2.tgz", + "integrity": "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==" + }, "node_modules/leven": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", @@ -8091,6 +8668,11 @@ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, + "node_modules/lodash-es": { + "version": "4.17.21", + "resolved": "https://registry.npmmirror.com/lodash-es/-/lodash-es-4.17.21.tgz", + "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw==" + }, "node_modules/lodash.debounce": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", @@ -8115,213 +8697,563 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/loose-envify": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", - "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lower-case": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", + "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "dependencies": { + "tslib": "^2.0.3" + } + }, + "node_modules/lowercase-keys": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", + "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", + "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", + "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", + "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-frontmatter": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", + "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "escape-string-regexp": "^5.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-extension-frontmatter": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", + "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", + "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", + "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", + "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", + "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", "dependencies": { - "js-tokens": "^3.0.0 || ^4.0.0" + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" }, - "bin": { - "loose-envify": "cli.js" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/lower-case": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz", - "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==", + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", "dependencies": { - "tslib": "^2.0.3" - } - }, - "node_modules/lowercase-keys": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-3.0.0.tgz", - "integrity": "sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==", - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", "dependencies": { - "yallist": "^3.0.2" + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/markdown-extensions": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", - "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", - "engines": { - "node": ">=16" + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/markdown-table": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz", - "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==", + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", + "integrity": "sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-directive": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.0.0.tgz", - "integrity": "sha512-JUpYOqKI4mM3sZcNxmF/ox04XYFFkNwr0CFlrQIkCwbvH0xzMCqkMqAde9wRd80VAhaUrwFwKm2nxretdT1h7Q==", + "node_modules/mdast-util-mdx-jsx": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.2.tgz", + "integrity": "sha512-eKMQDeywY2wlHc97k5eD8VC+9ASMjN8ItEZQNGwJ6E0XWKiW/Z0V5/H8pvoXUf+y+Mj0VIgeRRbujBmFn4FTyA==", "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", - "devlop": "^1.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", - "unist-util-visit-parents": "^6.0.0" + "unist-util-remove-position": "^5.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-find-and-replace": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.1.tgz", - "integrity": "sha512-SG21kZHGC3XRTSUhtofZkBzZTJNM5ecCi0SK2IMKmSXR8vO3peL+kb1O0z7Zl83jKtutG4k5Wv/W7V3/YHvzPA==", + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", "dependencies": { "@types/mdast": "^4.0.0", - "escape-string-regexp": "^5.0.0", - "unist-util-is": "^6.0.0", - "unist-util-visit-parents": "^6.0.0" + "unist-util-is": "^6.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-find-and-replace/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "engines": { - "node": ">=12" + "node_modules/mdast-util-to-hast": { + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" }, "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-from-markdown": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", - "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", + "node_modules/mdast-util-to-markdown": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", + "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", - "decode-named-character-reference": "^1.0.0", - "devlop": "^1.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", - "micromark": "^4.0.0", - "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0", - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0", - "unist-util-stringify-position": "^4.0.0" + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-from-markdown/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", - "funding": [ - { - "type": "GitHub Sponsors", - "url": "https://github.com/sponsors/unifiedjs" - }, - { - "type": "OpenCollective", - "url": "https://opencollective.com/unified" - } - ] - }, - "node_modules/mdast-util-frontmatter": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-frontmatter/-/mdast-util-frontmatter-2.0.1.tgz", - "integrity": "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA==", + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "escape-string-regexp": "^5.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-extension-frontmatter": "^2.0.0" + "@types/mdast": "^4.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-frontmatter/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "node_modules/mdn-data": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", + "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/memfs": { + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", + "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "dependencies": { + "fs-monkey": "^1.0.4" + }, + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", + "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">= 8" } }, - "node_modules/mdast-util-gfm": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.0.0.tgz", - "integrity": "sha512-dgQEX5Amaq+DuUqf26jJqSK9qgixgd6rYDHAv4aTBuA92cTknZlKpPfa86Z/s8Dj8xsAQpFfBmPUHWJBWqS4Bw==", + "node_modules/mermaid": { + "version": "10.9.1", + "resolved": "https://registry.npmmirror.com/mermaid/-/mermaid-10.9.1.tgz", + "integrity": "sha512-Mx45Obds5W1UkW1nv/7dHRsbfMM1aOKA2+Pxs/IGHNonygDHwmng8xTHyS9z4KWVi0rbko8gjiBmuwwXQ7tiNA==", + "dependencies": { + "@braintree/sanitize-url": "^6.0.1", + "@types/d3-scale": "^4.0.3", + "@types/d3-scale-chromatic": "^3.0.0", + "cytoscape": "^3.28.1", + "cytoscape-cose-bilkent": "^4.1.0", + "d3": "^7.4.0", + "d3-sankey": "^0.12.3", + "dagre-d3-es": "7.0.10", + "dayjs": "^1.11.7", + "dompurify": "^3.0.5", + "elkjs": "^0.9.0", + "katex": "^0.16.9", + "khroma": "^2.0.0", + "lodash-es": "^4.17.21", + "mdast-util-from-markdown": "^1.3.0", + "non-layered-tidy-tree-layout": "^2.0.2", + "stylis": "^4.1.3", + "ts-dedent": "^2.2.0", + "uuid": "^9.0.0", + "web-worker": "^1.2.0" + } + }, + "node_modules/mermaid/node_modules/@types/mdast": { + "version": "3.0.15", + "resolved": "https://registry.npmmirror.com/@types/mdast/-/mdast-3.0.15.tgz", + "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/mermaid/node_modules/@types/unist": { + "version": "2.0.10", + "resolved": "https://registry.npmmirror.com/@types/unist/-/unist-2.0.10.tgz", + "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" + }, + "node_modules/mermaid/node_modules/mdast-util-from-markdown": { + "version": "1.3.1", + "resolved": "https://registry.npmmirror.com/mdast-util-from-markdown/-/mdast-util-from-markdown-1.3.1.tgz", + "integrity": "sha512-4xTO/M8c82qBcnQc1tgpNtubGUW/Y1tBQ1B0i5CtSoelOLKFYlElIr3bvgREYYO5iRqbMY1YuqZng0GVOI8Qww==", "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-gfm-autolink-literal": "^2.0.0", - "mdast-util-gfm-footnote": "^2.0.0", - "mdast-util-gfm-strikethrough": "^2.0.0", - "mdast-util-gfm-table": "^2.0.0", - "mdast-util-gfm-task-list-item": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" + "@types/mdast": "^3.0.0", + "@types/unist": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "mdast-util-to-string": "^3.1.0", + "micromark": "^3.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-decode-string": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "unist-util-stringify-position": "^3.0.0", + "uvu": "^0.5.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-gfm-autolink-literal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.0.tgz", - "integrity": "sha512-FyzMsduZZHSc3i0Px3PQcBT4WJY/X/RCtEJKuybiC6sjPqLv7h1yqAkmILZtuxMSsUyaLUWNp71+vQH2zqp5cg==", + "node_modules/mermaid/node_modules/mdast-util-to-string": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/mdast-util-to-string/-/mdast-util-to-string-3.2.0.tgz", + "integrity": "sha512-V4Zn/ncyN1QNSqSBxTrMOLpjr+IKdHl2v3KVLoWmDPscP4r9GcCi71gjgvUV1SFSKh92AjAG4peFuBl2/YgCJg==", "dependencies": { - "@types/mdast": "^4.0.0", - "ccount": "^2.0.0", - "devlop": "^1.0.0", - "mdast-util-find-and-replace": "^3.0.0", - "micromark-util-character": "^2.0.0" + "@types/mdast": "^3.0.0" }, "funding": { "type": "opencollective", "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-character": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.0.tgz", - "integrity": "sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ==", + "node_modules/mermaid/node_modules/micromark": { + "version": "3.2.0", + "resolved": "https://registry.npmmirror.com/micromark/-/micromark-3.2.0.tgz", + "integrity": "sha512-uD66tJj54JLYq0De10AhWycZWGQNUvDI55xPgk2sQM5kn1JYlhbCMTtEeT27+vAhW2FBQxLlOmS3pmA7/2z4aA==", "funding": [ { "type": "GitHub Sponsors", @@ -8333,14 +9265,29 @@ } ], "dependencies": { - "micromark-util-symbol": "^2.0.0", - "micromark-util-types": "^2.0.0" + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "micromark-core-commonmark": "^1.0.1", + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-combine-extensions": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-sanitize-uri": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" } }, - "node_modules/mdast-util-gfm-autolink-literal/node_modules/micromark-util-symbol": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz", - "integrity": "sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw==", + "node_modules/mermaid/node_modules/micromark-core-commonmark": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-core-commonmark/-/micromark-core-commonmark-1.1.0.tgz", + "integrity": "sha512-BgHO1aRbolh2hcrzL2d1La37V0Aoz73ymF8rAcKnohLy93titmv62E0gP8Hrx9PKcKrqCZ1BbLGbP3bEhoXYlw==", "funding": [ { "type": "GitHub Sponsors", @@ -8350,247 +9297,349 @@ "type": "OpenCollective", "url": "https://opencollective.com/unified" } - ] + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-factory-destination": "^1.0.0", + "micromark-factory-label": "^1.0.0", + "micromark-factory-space": "^1.0.0", + "micromark-factory-title": "^1.0.0", + "micromark-factory-whitespace": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-chunked": "^1.0.0", + "micromark-util-classify-character": "^1.0.0", + "micromark-util-html-tag-name": "^1.0.0", + "micromark-util-normalize-identifier": "^1.0.0", + "micromark-util-resolve-all": "^1.0.0", + "micromark-util-subtokenize": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.1", + "uvu": "^0.5.0" + } }, - "node_modules/mdast-util-gfm-footnote": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.0.0.tgz", - "integrity": "sha512-5jOT2boTSVkMnQ7LTrd6n/18kqwjmuYqo7JUPe+tRCY6O7dAuTFMtTPauYYrMPpox9hlN0uOx/FL8XvEfG9/mQ==", + "node_modules/mermaid/node_modules/micromark-factory-destination": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-factory-destination/-/micromark-factory-destination-1.1.0.tgz", + "integrity": "sha512-XaNDROBgx9SgSChd69pjiGKbV+nfHGDPVYFs5dOoDd7ZnMAE+Cuu91BCpsY8RT2NP9vo/B8pds2VQNCLiu0zhg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "micromark-util-normalize-identifier": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-gfm-strikethrough": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", - "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "node_modules/mermaid/node_modules/micromark-factory-label": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-factory-label/-/micromark-factory-label-1.1.0.tgz", + "integrity": "sha512-OLtyez4vZo/1NjxGhcpDSbHQ+m0IIGnT8BoPamh+7jVlzLJBH98zzuCoUeMxvM6WsNeh8wx8cKvqLiPHEACn0w==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" } }, - "node_modules/mdast-util-gfm-table": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", - "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "node_modules/mermaid/node_modules/micromark-factory-title": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-factory-title/-/micromark-factory-title-1.1.0.tgz", + "integrity": "sha512-J7n9R3vMmgjDOCY8NPw55jiyaQnH5kBdV2/UXCtZIpnHH3P6nHUKaH7XXEYuWwx/xUJcawa8plLBEjMPU24HzQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "markdown-table": "^3.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-gfm-task-list-item": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", - "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "node_modules/mermaid/node_modules/micromark-factory-whitespace": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-factory-whitespace/-/micromark-factory-whitespace-1.1.0.tgz", + "integrity": "sha512-v2WlmiymVSp5oMg+1Q0N1Lxmt6pMhIHD457whWM7/GUlEks1hI9xj5w3zbc4uuMKXGisksZk8DzP2UyGbGqNsQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-factory-space": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-mdx": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", - "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "node_modules/mermaid/node_modules/micromark-util-chunked": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-chunked/-/micromark-util-chunked-1.1.0.tgz", + "integrity": "sha512-Ye01HXpkZPNcV6FiyoW2fGZDUw4Yc7vT0E9Sad83+bEDiCJ1uXu0S3mr8WLpsz3HaG3x2q0HM6CTuPdcZcluFQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-mdx-expression": "^2.0.0", - "mdast-util-mdx-jsx": "^3.0.0", - "mdast-util-mdxjs-esm": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-mdx-expression": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", - "integrity": "sha512-fGCu8eWdKUKNu5mohVGkhBXCXGnOTLuFqOvGMvdikr+J1w7lDJgxThOKpwRWzzbyXAU2hhSwsmssOY4yTokluw==", + "node_modules/mermaid/node_modules/micromark-util-classify-character": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-classify-character/-/micromark-util-classify-character-1.1.0.tgz", + "integrity": "sha512-SL0wLxtKSnklKSUplok1WQFoGhUdWYKggKUiqhX+Swala+BtptGCu5iPRc+xvzJ4PXE/hwM3FNXsfEVgoZsWbw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-mdx-jsx": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.2.tgz", - "integrity": "sha512-eKMQDeywY2wlHc97k5eD8VC+9ASMjN8ItEZQNGwJ6E0XWKiW/Z0V5/H8pvoXUf+y+Mj0VIgeRRbujBmFn4FTyA==", + "node_modules/mermaid/node_modules/micromark-util-combine-extensions": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.1.0.tgz", + "integrity": "sha512-Q20sp4mfNf9yEqDL50WwuWZHUrCO4fEyeDCnMGmG5Pr0Cz15Uo7KBs6jq+dq0EgX4DPwwrh9m0X+zPV1ypFvUA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "ccount": "^2.0.0", - "devlop": "^1.1.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0", - "parse-entities": "^4.0.0", - "stringify-entities": "^4.0.0", - "unist-util-remove-position": "^5.0.0", - "unist-util-stringify-position": "^4.0.0", - "vfile-message": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-chunked": "^1.0.0", + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-mdxjs-esm": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", - "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "node_modules/mermaid/node_modules/micromark-util-decode-numeric-character-reference": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.1.0.tgz", + "integrity": "sha512-m9V0ExGv0jB1OT21mrWcuf4QhP46pH1KkfWy9ZEezqHKAxkj4mPCy3nIH1rkbdMlChLHX531eOrymlwyZIf2iw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/estree-jsx": "^1.0.0", - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "devlop": "^1.0.0", - "mdast-util-from-markdown": "^2.0.0", - "mdast-util-to-markdown": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-phrasing": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", - "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "node_modules/mermaid/node_modules/micromark-util-decode-string": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-decode-string/-/micromark-util-decode-string-1.1.0.tgz", + "integrity": "sha512-YphLGCK8gM1tG1bd54azwyrQRjCFcmgj2S2GoJDNnh4vYtnL38JS8M4gpxzOPNyHdNEpheyWXCTnnTDY3N+NVQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0", - "unist-util-is": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^1.0.0", + "micromark-util-decode-numeric-character-reference": "^1.0.0", + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-to-hast": { - "version": "13.2.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", - "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "node_modules/mermaid/node_modules/micromark-util-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-encode/-/micromark-util-encode-1.1.0.tgz", + "integrity": "sha512-EuEzTWSTAj9PA5GOAs992GzNh2dGQO52UvAbtSOMvXTxv3Criqb6IOzJUBCmEqrrXSblJIJBbFFv6zPxpreiJw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mermaid/node_modules/micromark-util-html-tag-name": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.2.0.tgz", + "integrity": "sha512-VTQzcuQgFUD7yYztuQFKXT49KghjtETQ+Wv/zUjGSGBioZnkA4P1XXZPT1FHeJA6RwRXSF47yvJ1tsJdoxwO+Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mermaid/node_modules/micromark-util-normalize-identifier": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.1.0.tgz", + "integrity": "sha512-N+w5vhqrBihhjdpM8+5Xsxy71QWqGn7HYNUvch71iV2PM7+E3uWGox1Qp90loa1ephtCxG2ftRV/Conitc6P2Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/hast": "^3.0.0", - "@types/mdast": "^4.0.0", - "@ungap/structured-clone": "^1.0.0", - "devlop": "^1.0.0", - "micromark-util-sanitize-uri": "^2.0.0", - "trim-lines": "^3.0.0", - "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0", - "vfile": "^6.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdast-util-to-markdown": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", - "integrity": "sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ==", + "node_modules/mermaid/node_modules/micromark-util-resolve-all": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-resolve-all/-/micromark-util-resolve-all-1.1.0.tgz", + "integrity": "sha512-b/G6BTMSg+bX+xVCshPTPyAu2tmA0E4X98NSR7eIbeC6ycCqCeE7wjfDIgzEbkzdEVJXRtOG4FbEm/uGbCRouA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0", - "@types/unist": "^3.0.0", - "longest-streak": "^3.0.0", - "mdast-util-phrasing": "^4.0.0", - "mdast-util-to-string": "^4.0.0", - "micromark-util-decode-string": "^2.0.0", - "unist-util-visit": "^5.0.0", - "zwitch": "^2.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-types": "^1.0.0" } }, - "node_modules/mdast-util-to-string": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", - "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "node_modules/mermaid/node_modules/micromark-util-sanitize-uri": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.2.0.tgz", + "integrity": "sha512-QO4GXv0XZfWey4pYFndLUKEAktKkG5kZTdUNaTAkzbuJxn2tNBOr+QtxR2XpWaMhbImT2dPzyLrPXLlPhph34A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], "dependencies": { - "@types/mdast": "^4.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/unified" + "micromark-util-character": "^1.0.0", + "micromark-util-encode": "^1.0.0", + "micromark-util-symbol": "^1.0.0" } }, - "node_modules/mdn-data": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz", - "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow==" - }, - "node_modules/media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", - "engines": { - "node": ">= 0.6" + "node_modules/mermaid/node_modules/micromark-util-subtokenize": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-subtokenize/-/micromark-util-subtokenize-1.1.0.tgz", + "integrity": "sha512-kUQHyzRoxvZO2PuLzMt2P/dwVsTiivCK8icYTeR+3WgbuPqfHgPPy7nFKbeqRivBvn/3N3GBiNC+JRTMSxEC7A==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^1.0.0", + "micromark-util-symbol": "^1.0.0", + "micromark-util-types": "^1.0.0", + "uvu": "^0.5.0" } }, - "node_modules/memfs": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz", - "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==", + "node_modules/mermaid/node_modules/micromark-util-types": { + "version": "1.1.0", + "resolved": "https://registry.npmmirror.com/micromark-util-types/-/micromark-util-types-1.1.0.tgz", + "integrity": "sha512-ukRBgie8TIAcacscVHSiddHjO4k/q3pnedmzMQ4iwDcK0FtFCohKOlFbaOL/mPgfnPsL3C1ZyxJa4sbWrBl3jg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mermaid/node_modules/unist-util-stringify-position": { + "version": "3.0.3", + "resolved": "https://registry.npmmirror.com/unist-util-stringify-position/-/unist-util-stringify-position-3.0.3.tgz", + "integrity": "sha512-k5GzIBZ/QatR8N5X2y+drfpWG8IDBzdnVj6OInRNWm1oXrzydiaAT2OQiA8DPRRZyAKb9b6I2a6PxYklZD0gKg==", "dependencies": { - "fs-monkey": "^1.0.4" + "@types/unist": "^2.0.0" }, - "engines": { - "node": ">= 4.0.0" + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" } }, - "node_modules/merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha512-cCi6g3/Zr1iqQi6ySbseM1Xvooa98N0w31jzUYrXPX2xqObmFGHJ0tQ5u74H3mVh7wLouTseZyYIq39g8cNp1w==" - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" - }, - "node_modules/merge2": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", - "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "engines": { - "node": ">= 8" + "node_modules/mermaid/node_modules/uuid": { + "version": "9.0.1", + "resolved": "https://registry.npmmirror.com/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", + "funding": [ + "https://github.com/sponsors/broofa", + "https://github.com/sponsors/ctavan" + ], + "bin": { + "uuid": "dist/bin/uuid" } }, "node_modules/methods": { @@ -10376,6 +11425,14 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/mri": { + "version": "1.2.0", + "resolved": "https://registry.npmmirror.com/mri/-/mri-1.2.0.tgz", + "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==", + "engines": { + "node": ">=4" + } + }, "node_modules/mrmime": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.0.tgz", @@ -10486,6 +11543,11 @@ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" }, + "node_modules/non-layered-tidy-tree-layout": { + "version": "2.0.2", + "resolved": "https://registry.npmmirror.com/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz", + "integrity": "sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw==" + }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -12567,6 +13629,11 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/robust-predicates": { + "version": "3.0.2", + "resolved": "https://registry.npmmirror.com/robust-predicates/-/robust-predicates-3.0.2.tgz", + "integrity": "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==" + }, "node_modules/rtl-css-js": { "version": "1.16.1", "resolved": "https://registry.npmjs.org/rtl-css-js/-/rtl-css-js-1.16.1.tgz", @@ -12619,6 +13686,22 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/rw": { + "version": "1.3.3", + "resolved": "https://registry.npmmirror.com/rw/-/rw-1.3.3.tgz", + "integrity": "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ==" + }, + "node_modules/sade": { + "version": "1.8.1", + "resolved": "https://registry.npmmirror.com/sade/-/sade-1.8.1.tgz", + "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==", + "dependencies": { + "mri": "^1.1.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -13699,6 +14782,14 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/ts-dedent": { + "version": "2.2.0", + "resolved": "https://registry.npmmirror.com/ts-dedent/-/ts-dedent-2.2.0.tgz", + "integrity": "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==", + "engines": { + "node": ">=6.10" + } + }, "node_modules/ts-easing": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/ts-easing/-/ts-easing-0.2.0.tgz", @@ -14195,6 +15286,31 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/uvu": { + "version": "0.5.6", + "resolved": "https://registry.npmmirror.com/uvu/-/uvu-0.5.6.tgz", + "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==", + "dependencies": { + "dequal": "^2.0.0", + "diff": "^5.0.0", + "kleur": "^4.0.3", + "sade": "^1.7.3" + }, + "bin": { + "uvu": "bin.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/uvu/node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmmirror.com/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "engines": { + "node": ">=6" + } + }, "node_modules/value-equal": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz", @@ -14277,6 +15393,11 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/web-worker": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/web-worker/-/web-worker-1.3.0.tgz", + "integrity": "sha512-BSR9wyRsy/KOValMgd5kMyr3JzpdeoR9KVId8u5GVlTTAtNChlsE4yTxeY7zMdNSyOmoKBv8NH2qeRY9Tg+IaA==" + }, "node_modules/webpack": { "version": "5.91.0", "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.91.0.tgz", diff --git a/docs/package.json b/docs/package.json index 8d3bcb970e46..a529476c4786 100644 --- a/docs/package.json +++ b/docs/package.json @@ -18,6 +18,7 @@ "@docusaurus/core": "^3.4.0", "@docusaurus/plugin-content-pages": "^3.4.0", "@docusaurus/preset-classic": "^3.4.0", + "@docusaurus/theme-mermaid": "^3.4.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "prism-react-renderer": "^2.3.0", diff --git a/docs/src/components/CustomFooter.tsx b/docs/src/components/CustomFooter.tsx index 36e12ac94aac..23b36d6ec28e 100644 --- a/docs/src/components/CustomFooter.tsx +++ b/docs/src/components/CustomFooter.tsx @@ -17,11 +17,9 @@ function CustomFooter() { -
- Community -
+
- + diff --git a/docs/src/components/Demo/Demo.tsx b/docs/src/components/Demo/Demo.tsx index a7493e34ce4a..25621bca5f15 100644 --- a/docs/src/components/Demo/Demo.tsx +++ b/docs/src/components/Demo/Demo.tsx @@ -6,7 +6,7 @@ export function Demo() { return (
- - Get Started - + +
+ + Code + + + Join our Slack community + + + Join our Discord community + + + + Paper on Arxiv + + + Evaluation Benchmark +
+
diff --git a/docs/src/components/Welcome/Welcome.tsx b/docs/src/components/Welcome/Welcome.tsx deleted file mode 100644 index bea684915247..000000000000 --- a/docs/src/components/Welcome/Welcome.tsx +++ /dev/null @@ -1,20 +0,0 @@ -import "../../css/welcome.css"; -import Translate from '@docusaurus/Translate'; - -export function Welcome() { - return ( -
-
- -

- - Welcome to OpenDevin, an open-source autonomous AI software engineer - that is capable of executing - complex engineering tasks and collaborating actively with users on - software development projects. - -

-
-
- ); -} diff --git a/docs/src/css/faq.css b/docs/src/css/faq.css deleted file mode 100644 index 07bfe7b3e0ed..000000000000 --- a/docs/src/css/faq.css +++ /dev/null @@ -1,66 +0,0 @@ -/* faq.css */ - -.faq-container { - margin: auto; - padding: 24px; - display: flex; - flex-direction: column; - gap: 8px; - margin-bottom: 24px; - } - - .faq-title { - display: flex; - align-items: center; - justify-content: center; - font-size: 2rem; - padding: 8px; - text-transform: uppercase; - font-weight: bold; - } - - @media (min-width: 1024px) { - .faq-title { - font-size: 6rem; - } - } - - .faq-section { - display: flex; - flex-direction: column; - gap: 8px; - width: 100%; - margin-bottom: 24px; - } - - .faq-section-title { - text-transform: uppercase; - font-weight: bold; - font-size: 2rem; - letter-spacing: 0.1em; - } - - .highlight { - font-weight: 600; - color: var(--logo); - } - - .faq-steps ol { - padding-left: 24px; - } - - .command-box { - display: flex; - flex-direction: column; - padding: 8px; - background-color: #e0e0e0; - border-radius: 0.375rem; - height: 6vh; - text-transform: uppercase; - color: #4a5568; - } - - .command-box + .command-box { - height: 8vh; - } - \ No newline at end of file diff --git a/docs/src/css/footer.css b/docs/src/css/footer.css index d17277367940..173ac6406711 100644 --- a/docs/src/css/footer.css +++ b/docs/src/css/footer.css @@ -3,12 +3,12 @@ .custom-footer { background-color: dark; color: white; - height: 25vh; + height: 200px; /* background: linear-gradient(to bottom, #1a1a1a, #1a1a1a); */ background: linear-gradient(to bottom, #1f2937, #000000); } - + .footer-content { display: flex; flex-direction: column; @@ -17,56 +17,55 @@ padding: 8px; height: 100%; } - + .footer-top { display: flex; gap: 8px; align-items: center; } - + .footer-title { font-weight: bold; font-size: 1.125rem; } - + @media (min-width: 768px) { .footer-title { font-size: 1.875rem; } } - + .footer-link a { font-size: 0.875rem; text-decoration: none; color: gray; transition: color 0.3s ease; } - + .footer-link a:hover { color: white; } - + .footer-community { text-transform: uppercase; font-weight: 300; } - + .footer-icons { display: flex; gap: 24px; font-size: 1.875rem; } - + .footer-icons a { color:gray; transition: color 0.3s ease; } - + .footer-icons a:hover { color: white; } - + .footer-bottom { text-transform: uppercase; } - \ No newline at end of file diff --git a/docs/src/css/homepageHeader.css b/docs/src/css/homepageHeader.css index 8e1b54336176..f8dd2003d763 100644 --- a/docs/src/css/homepageHeader.css +++ b/docs/src/css/homepageHeader.css @@ -1,36 +1,47 @@ /* homepageHeader.css */ .homepage-header { - height: 100vh; - color: white; - background: linear-gradient(to top, #64748b, #000000); - } - - .header-content { - display: flex; - flex-direction: column; - gap: 8px; - align-items: center; - padding: 24px; - font-weight: 300; - width: 100%; - } - + height: 800px; + color: white; + background: linear-gradient(to top, #64748b, #000000); +} + +.header-content { + display: flex; + flex-direction: column; + align-items: center; + padding: 2rem; + font-weight: 300; + width: 100%; +} + +.header-title { + font-size: 3rem; +} + +@media (min-width: 768px) { .header-title { - font-size: 3rem; - } - - @media (min-width: 768px) { - .header-title { - font-size: 5rem; - } + font-size: 4rem; } - - .header-subtitle { - font-size: 1.25rem; - } - - .header-buttons { - margin-top: 24px; - } - \ No newline at end of file +} + +.header-subtitle { + font-size: 1.5rem; +} + +.header-links { + display: flex; + flex-wrap: wrap; + justify-content: center; + gap: 10px; + max-width: 680px; +} + +.header-links a { + display: inline-block; + transition: transform 0.2s ease-in-out; +} + +.header-links a:hover { + transform: translateY(-2px); +} diff --git a/docs/src/css/welcome.css b/docs/src/css/welcome.css deleted file mode 100644 index 731cc87c1984..000000000000 --- a/docs/src/css/welcome.css +++ /dev/null @@ -1,53 +0,0 @@ -/* welcome.css */ - -.text-white { - color: white; - } - - .welcome-container { - display: flex; - justify-content: center; - align-items: center; - flex-direction: column; - background: linear-gradient(to bottom, #64748b, #1f2937); - } - - @media (min-width: 768px) { - .welcome-container { - flex-direction: row; - background: linear-gradient(to bottom, #64748b, #1f2937); - } - } - - .welcome-logo { - height: 45vh; - width: 45vw; - } - - @media (max-width: 640px) { - .welcome-logo { - height: 40vw; - width: 40vw; - } - } - - @media (min-width: 768px) { - .welcome-logo { - height: auto; - width: 350px; - } - } - - .welcome-text { - padding: 24px; - margin-bottom: 24px; - font-weight: 300; - font-size: 1.125rem; - } - - @media (min-width: 768px) { - .welcome-text { - padding: 8px; - font-size: 1.5rem; - } - } diff --git a/docs/src/pages/faq.tsx b/docs/src/pages/faq.tsx deleted file mode 100644 index c185e705f9ac..000000000000 --- a/docs/src/pages/faq.tsx +++ /dev/null @@ -1,129 +0,0 @@ -import Layout from '@theme/Layout'; -import '../css/faq.css'; -import Translate, { translate } from '@docusaurus/Translate'; - -export default function FAQ() { - const githubLink = ( - GitHub - ); - const discordLink = ( - Discord - ); - const slackLink = ( - Slack - ); - - return ( - -
-
- Frequently Asked Questions -
-
-
- What is OpenDevin? -
-

- OpenDevin{" "} - - is an autonomous software engineer that can solve software engineering - and web-browsing tasks end-to-end. It can perform data science queries, such - as "Find the number of pull requests to the OpenDevin repository in the last - month," and software engineering tasks, such as "Please add tests to this - file and verify that all the tests pass. If they don't fix the file." - -

-

- - At the same time, OpenDevin is a platform and community for agent developers - to test out and evaluate new agents. - -

-
-
-
- Support -
-
- - {`Please file a bug on {githubLink} if you notice a problem that likely affects others. If you're having trouble installing, or have general questions, reach out on {discordLink} or {slackLink}.`} - -
-
-
-
- How to fix a GitHub issue with OpenDevin? -
-
- - To fix an issue on GitHub using OpenDevin, send a prompt to OpenDevin asking it to follow - steps like the following: - -
    -
  1. Read the issue https://github.com/OpenDevin/OpenDevin/issues/1611
  2. -
  3. Clone the repository and check out a new branch
  4. -
  5. Based on the instructions in the issue description, modify files to fix the issue
  6. -
  7. Push the resulting output to GitHub using the GITHUB_TOKEN environment variable
  8. -
  9. Tell me the link that I need to go to to send a pull request
  10. -
- - Before you run OpenDevin, you can do: - -
- export SANDBOX_ENV_GITHUB_TOKEN=XXX -
- - where XXX is a GitHub token that you created that has permissions to push to the OpenDevin repo. If you donโ€™t have write permission to the OpenDevin repo, you might need to change that to: - -
- Push the resulting output to my fork at https://github.com/USERNAME/OpenDevin/ using the GITHUB_TOKEN environment variable -
- - where USERNAME is your GitHub username. - -
-
-
-
- How is OpenDevin different from Devin? -
-

- Devin  - - is a commercial product by Cognition Inc., that served as the initial - inspiration for OpenDevin. They both aim to do a good job at solving software - engineering tasks, but OpenDevin you can download, use, and modify, while Devin - you can only use through the Cognition site. In addition, OpenDevin has evolved - beyond the initial inspiration, and now serves as a community-driven ecosystem for - agent development in general, and we'd love to have you join and - - contribute! -

-
-
-
- How is OpenDevin different from ChatGPT? -
-

- - ChatGPT you can access online, it does not interface with local files, and - its ability to execute code is limited. So it can write code, but it is not - easy to test or execute it. - -

-
-
-
- ); -} diff --git a/docs/src/pages/index.tsx b/docs/src/pages/index.tsx index 7a2b9f0cc032..8f1605b0c6ee 100644 --- a/docs/src/pages/index.tsx +++ b/docs/src/pages/index.tsx @@ -4,12 +4,11 @@ import { HomepageHeader } from "../components/HomepageHeader/HomepageHeader"; import { Welcome } from "../components/Welcome/Welcome"; import { translate } from '@docusaurus/Translate'; -export function Header({ title, summary, description }): JSX.Element { +export function Header({ title, summary }): JSX.Element { return (

{title}

-

{summary}

-

{description}

+

{summary}

); } @@ -17,22 +16,15 @@ export function Header({ title, summary, description }): JSX.Element { export default function Home(): JSX.Element { const { siteConfig } = useDocusaurusContext(); return ( - <> -
- -
- -
-
+
- ); } diff --git a/docs/static/img/system_architecture_overview.png b/docs/static/img/system_architecture_overview.png new file mode 100644 index 000000000000..a9174fc6ba72 Binary files /dev/null and b/docs/static/img/system_architecture_overview.png differ diff --git a/docs/static/img/teaser.mp4 b/docs/static/img/teaser.mp4 index 12ae85f59511..6a3cc0821b05 100644 Binary files a/docs/static/img/teaser.mp4 and b/docs/static/img/teaser.mp4 differ diff --git a/evaluation/EDA/README.md b/evaluation/EDA/README.md index 8ae5f7b843f3..06e453ec7fdd 100644 --- a/evaluation/EDA/README.md +++ b/evaluation/EDA/README.md @@ -2,9 +2,10 @@ This folder contains evaluation harness for evaluating agents on the Entity-deduction-Arena Benchmark, from the paper [Probing the Multi-turn Planning Capabilities of LLMs via 20 Question Games](https://arxiv.org/abs/2310.01468), presented in ACL 2024 main conference. -## Configure OpenDevin and your LLM +## Setup Environment and LLM Configuration + +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. -Create a `config.toml` file if it does not exist at the root of the workspace. Please check [README.md](../../README.md) for how to set this up. ## Start the evaluation diff --git a/evaluation/EDA/run_infer.py b/evaluation/EDA/run_infer.py index 5cd66901d6ce..29ce4241c66b 100644 --- a/evaluation/EDA/run_infer.py +++ b/evaluation/EDA/run_infer.py @@ -1,30 +1,27 @@ import asyncio -import logging import os import pandas as pd - -# import huggingface_hub from datasets import load_dataset from evaluation.EDA.game import Q20Game, Q20GameCelebrity from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent - -# from evaluation.EDA.scorer import question_scorer from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller game = None @@ -56,39 +53,44 @@ def codeact_user_response_eda(state: State) -> str: } -def process_instance( +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=False, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) +) -> EvalOutput: + config = get_config(metadata) + instance_id = instance['text'].strip() + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - eval_output_dir = metadata.eval_output_dir if reset_logger: - # Set up logger - log_file = os.path.join( - eval_output_dir, 'logs', f'instance_{instance["text"].strip()}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance["text"].strip()}.\nLOG: tail -f {log_file}' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance_id}.') # Prepare instruction - _game_class = {'things': Q20Game, 'celebs': Q20GameCelebrity} + _game_class = {'eda-things': Q20Game, 'eda-celebs': Q20GameCelebrity} guesser_kargs = { 'max_new_tokens': 64, @@ -112,24 +114,16 @@ def process_instance( instruction = f'{game.first_user_utterance}' logger.info(f'Instruction: {instruction}') - - # instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' - # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] # Here's how you can run the agent (similar to the `main` function) and get the final task state + runtime = await create_runtime(config, sid=instance['text'].strip()) - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[ - agent.__class__.__name__ - ], - sid=instance['text'].strip(), - ) + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], ) # ======= Attempt to evaluate the agent's edits ======= # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) @@ -150,21 +144,20 @@ def process_instance( histories = state.history.compatibility_for_eval_history_pairs() # Save the output - output = { - 'instance_id': instance['text'].strip(), - 'instance': instance, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': { + output = EvalOutput( + instance_id=instance_id, + instance=instance.to_dict(), + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ 'success': test_result, 'final_message': final_message, 'ground_truth': instance['text'], }, - } - + ) return output @@ -191,12 +184,16 @@ def process_instance( ) args, _ = parser.parse_known_args() - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') - eda_dataset = load_dataset( 'yizheapple/entity-deduction-arena', name=args.dataset, split=args.data_split ) + eda_dataset.rename(columns={'text': 'instance_id'}, inplace=True) + + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, @@ -214,16 +211,15 @@ def process_instance( output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') prepared_dataset = prepare_dataset( - eda_dataset.to_pandas(), output_file, args.eval_n_limit, 'text' + eda_dataset.to_pandas(), output_file, args.eval_n_limit ) - agent = Agent.get_cls(args.agent_cls)(llm=LLM(config.llm)) - - run_evaluation( - prepared_dataset, - metadata, - output_file, - args.eval_num_workers, - process_instance, - 'text', + asyncio.run( + run_evaluation( + prepared_dataset, + metadata, + output_file, + args.eval_num_workers, + process_instance, + ) ) diff --git a/evaluation/EDA/scripts/run_infer.sh b/evaluation/EDA/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/README.md b/evaluation/README.md index 496b313ffb45..7ede494b8e46 100644 --- a/evaluation/README.md +++ b/evaluation/README.md @@ -12,15 +12,59 @@ all the preprocessing/evaluation/analysis scripts. ## Supported Benchmarks +To learn more about how to integrate your benchmark into OpenDevin, check out [tutorial here](https://docs.all-hands.dev/modules/usage/evaluation_harness). + +### Software Engineering + - SWE-Bench: [`evaluation/swe_bench`](./swe_bench) -- ML-Bench: [`evaluation/ml_bench`](./ml_bench) - HumanEvalFix: [`evaluation/humanevalfix`](./humanevalfix) +- BIRD: [`evaluation/bird`](./bird) +- BioCoder: [`evaluation/ml_bench`](./ml_bench) +- ML-Bench: [`evaluation/ml_bench`](./ml_bench) +- APIBench: [`evaluation/gorilla`](./gorilla/) +- ToolQA: [`evaluation/toolqa`](./toolqa/) + +### Web Browsing + +- WebArena: [`evaluation/webarena`](./webarena/) +- MiniWob++: [`evaluation/miniwob`](./miniwob/) + +### Misc. Assistance + - GAIA: [`evaluation/gaia`](./gaia) -- Entity deduction Arena (EDA): [`evaluation/EDA`](./EDA) -- MINT: [`evaluation/mint`](./mint) +- GPQA: [`evaluation/gpqa`](./gpqa) - AgentBench: [`evaluation/agent_bench`](./agent_bench) -- BIRD: [`evaluation/bird`](./bird) -- LogicReasoning: [`evaluation/logic_reasoning`](./logic_reasoning) +- MINT: [`evaluation/mint`](./mint) +- Entity deduction Arena (EDA): [`evaluation/EDA`](./EDA) +- ProofWriter: [`evaluation/logic_reasoning`](./logic_reasoning) + + +## Before everything begins: Setup Environment and LLM Configuration + +Please follow instruction [here](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup your local development environment and LLM. + +OpenDevin in development mode uses `config.toml` to keep track of most configurations. + +Here's an example configuration file you can use to define and use multiple LLMs: + +```toml +[llm] +# IMPORTANT: add your API key here, and set the model to the one you want to evaluate +model = "gpt-4o-2024-05-13" +api_key = "sk-XXX" + +[llm.eval_gpt4_1106_preview_llm] +model = "gpt-4-1106-preview" +api_key = "XXX" +temperature = 0.0 + +[llm.eval_some_openai_compatible_model_llm] +model = "openai/MODEL_NAME" +base_url = "https://OPENAI_COMPATIBLE_URL/v1" +api_key = "XXX" +temperature = 0.0 +``` + ### Result Visualization diff --git a/evaluation/TUTORIAL.md b/evaluation/TUTORIAL.md deleted file mode 100644 index 5906d7ff580c..000000000000 --- a/evaluation/TUTORIAL.md +++ /dev/null @@ -1,186 +0,0 @@ -# Tutorial: How to add a New Evaluation Benchmark to OpenDevin - -This tutorial provides a general guide on how to integrate your own evaluation benchmark into the OpenDevin framework. - -You can read this for details, and also learn by example by looking at our existing evaluations: -- [swe_bench](swe_bench/) - - -## A quick walk-through of OpenDevin architecture - -### Before everything begins - -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - -### Configuration file - -OpenDevin uses `config.toml` to keep track of most configurations. - -Here's an example configuration file you can use: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" - -# IMPORTANT: You should set these two paths to YOUR WORKSPACE directory, -# which will be mounted into Sandbox for agent to interact with! -# The OpenDevin agent will be able to read/write files whatever they like (even rm -rf) -# in this directory, so be careful!! -workspace_base = "/path/to/your/workspace" -workspace_mount_path = "/path/to/your/workspace" -# ========================== - -ssh_hostname = "localhost" - -run_as_devin = false - -[sandbox] -# SWEBench eval specific - but you can tweak it to your needs -use_host_network = false -# linting python after editing helps LLM fix indentations -enable_auto_lint = true - - -box_type = "ssh" -timeout = 120 - -[llm] -# IMPORTANT: add your API key here, and set the model to the one you want to evaluate -model = "gpt-4o-2024-05-13" -api_key = "sk-XXX" -``` - -### How to use OpenDevin programmatically - -In this section, for the purpose of building an evaluation task, we don't use the standard OpenDevin web-based GUI, but rather run OpenDevin backend from CLI. - -For example, you can run the following, which performs the specified task `-t`, with a particular model config `-l` and agent `-c`, for a maximum number of iterations `-i`: - -```bash -poetry run python ./opendevin/core/main.py \ - -i 10 \ - -t "Write me a bash script that print hello world." \ - -c CodeActAgent \ - -l llm -``` - -After running the script, you will observe the following: - -![](./static/example_task_1.png) - -You can see the agent uses bash to write a script, makes it executable, and then tests it by running it to make sure it is working. - -At the end of the above screenshot, OpenDevin actually requests user inputs when it think it finishes the task. This will cause issues in evaluation, since most evaluation don't assume additional user input. To fix this, we introduce the functionality of `fake_user_response_fn` in the `main` function, which we describe in the next section. - -## The `main` function - -The signature of `main` (in file [[`opendevin/core/main.py`](../opendevin/core/main.py)]) is as follows: - -```python -async def main( - task_str: str = '', - exit_on_message: bool = False, - fake_user_response_fn: Optional[Callable[[Optional[State]], str]] = None, - sandbox: Optional[Sandbox] = None, -) -> Optional[State]: -``` - -- `task_str`: The task instruction to run. In the above example, it is "Write me a bash script that print hello world." -- `exit_on_message`: whether to quit if the agent asks for a message from user -- `fake_user_response_fn`: An optional function that receives the current state (could be None) and returns a fake user response. -- `sandbox`: An optional sandbox to run the agent in. - -### `fake_user_response_fn` - -Here's an example of `fake_user_response_fn` in the implementation for SWE-Bench in [`evaluation/swe_bench/run_infer.py`](swe_bench/run_infer.py): - -```python -def codeact_user_response(state: State) -> str: - msg = ( - 'Please continue working on the task on whatever approach you think is suitable.\n' - 'If you think you have modified the code in a way that fixes the issue, please run the following command: exit .\n' - 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP OR USE THE INTERNET TO SOLVE THIS TASK.\n' - ) - # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up - if state.history: - user_msgs = [ - event - for event in state.history.get_events() - if isinstance(action, MessageAction) and action.source == 'user' - ] - if len(user_msgs) > 2: - # let the agent know that it can give up when it has tried 3 times - return ( - msg - + 'If you want to give up, run: exit .\n' - ) - return msg -``` - - -### Return value - -The main function returns a `State`, which is defined in [`opendevin/controller/state/state.py`](../opendevin/controller/state/state.py). We are mainly using `state.history` here, which is the most important field of data. You can imagine it is being a more structured version of OpenAI's chat completion [messages](https://platform.openai.com/docs/guides/text-generation/chat-completions-api). - -`history: list[tuple[Action, Observation]] = field(default_factory=list)` is a list of (action, observation) tuple. All the actions are defined at [`opendevin/events/action`](../opendevin/events/action) and observations are defined at [`opendevin/events/observation`](../opendevin/events/action). - -The agent can emit different actions like `CmdRunAction` (`opendevin/events/action/commands.py`) to execute bash commands and receive `CmdOutputObservation` (`opendevin/events/observation/commands.py`), `IPythonRunCellAction` to receive `IPythonRunCellObservation`, `BrowseInteractiveAction` (`opendevin/events/action/browse.py`) to browse the web and receive `BrowserOutputObservation` (`opendevin/events/observation/browse.py`). - -The action we used in this example is `MessageAction` (`opendevin/events/action/message.py`), which actually denotes a message from either `agent` or `user`. In the [CodeAct agent example](https://github.com/OpenDevin/OpenDevin/blob/7ca560471bd262f22513f3863995d0a8e6121c07/agenthub/codeact_agent/codeact_agent.py#L239-L273), an agent is considered to emit a `MessageAction` when it does not trigger a `CmdRunAction`, `IPythonRunCellAction`, and/or `BrowseInteractiveAction`. - -Typically, the agent returns `MessageAction` when it is confused about the task, and want to ask human for follow-up clarification, which is a good thing in real-world task, but not necessarily in evaluation. So in this example, we provide a dummy prompt to tell the agent "Please continue working on the task on whatever approach you think is suitable[...]". - -If you see something like this, you can consider adding this to your evaluation pipeline as well. - -### `sandbox` - -Sandbox is a fully functioning docker container where the agent can perform all sorts of tasks, e.g., using bash, calling Python, install packages, and more. You can leave `sandbox` to `None` if you don't need to do anything special to pre-configure the `Sandbox`. - -In SWE-Bench, we need to copy the proper repository directory to the workspace and activate the right python virtual environment before the agent can start performing the task, so we actually defined a custom [`SWEBenchSSHBox`](https://github.com/OpenDevin/OpenDevin/blob/7ca560471bd262f22513f3863995d0a8e6121c07/evaluation/swe_bench/swe_env_box.py#L12-L118) that inherit from the default sandbox [`SSHBox`](https://github.com/OpenDevin/OpenDevin/blob/7ca560471bd262f22513f3863995d0a8e6121c07/opendevin/runtime/docker/ssh_box.py#L188) and handles all these initial setup. If you need to configure the `sandbox` for your evaluation, check `SWEBenchSSHBox` for a reference of implementation. - -## How to put together an evaluation script? - -Now we know how to start running the agent end-to-end, and how `fake_user_response_fn` and `sandbox` work. We will walk through a piece of dummy code (simplified version of SWE-Bench's [`run_infer.py`](https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/run_infer.py)) that outline the general workflow: - -- Load the dataset and prepare the evaluation configuration. -- Filter out any instances that have already been processed. -- For each instance in the dataset: - - Set up the sandbox environment. - - Run the agent to generate a solution. - - Apply the solution to the instance and execute the test command. - - Collect the results and write them to the output file. -- Perform cleanup after the evaluation is complete. - -You can see the [swe_bench/run_infer.py](swe_bench/run_infer.py) file for an example. - -When you fully understand the `run_infer.py`, you can be ready to actually starting the evaluation! - - -## Run the evaluation! - -You can write your `run_infer.sh` script mimicking SWE-Bench's [`run_infer.sh`](https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/scripts/run_infer.sh). - - -You can start the evaluation by running: - -```bash -./run_infer.sh eval_gpt_4o_2024_05_13 -``` -Where `eval_gpt_4o_2024_05_13` is the model config you defined on the config.toml. -Like this: - -```toml -[core] -... - -[llm] -model="gpt-4-32k" -... - -[eval_gpt_4o_2024_05_13] -model="gpt-4o-2024-05-13" -api_key="sk-xxx" -``` - -If `[eval_gpt_4o_2024_05_13]` is not present, it will default to using the model configured in `[llm]`. diff --git a/evaluation/agent_bench/README.md b/evaluation/agent_bench/README.md index 6da710e1c10c..f656e17ba7a8 100644 --- a/evaluation/agent_bench/README.md +++ b/evaluation/agent_bench/README.md @@ -1,44 +1,10 @@ # AgentBench Evaluation -This folder contains evaluation harness for evaluating agents on -the [AgentBench: Evaluating LLMs as Agents](https://arxiv.org/abs/2308.03688). +This folder contains evaluation harness for evaluating agents on the [AgentBench: Evaluating LLMs as Agents](https://arxiv.org/abs/2308.03688). We currently only support running on the `osbench` subset. -## Configure OpenDevin and your LLM +## Setup Environment and LLM Configuration -Create a `config.toml` file if it does not exist at the root of the workspace. Please check [README.md](../../README.md) -for how to set this up. - -Here is an example `config.toml` file: - -```toml -[core] -max_iterations = 100 -cache_dir = "/path/to/cache" - -workspace_base = "/path/to/workspace" -workspace_mount_path = "/path/to/workspace" - -ssh_hostname = "localhost" - -# AgentBench specific -run_as_devin = true - -[sandbox] -use_host_network = false -enable_auto_lint = true -box_type = "ssh" -timeout = 120 - -[llm.eval_gpt35_turbo] -model = "gpt-3.5-turbo" -api_key = "sk-123" -temperature = 0.0 - -[llm.eval_gpt4o] -model = "gpt-4o" -api_key = "sk-123" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Start the evaluation @@ -46,7 +12,18 @@ temperature = 0.0 ./evaluation/agent_bench/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] ``` -Following is the basic command to start the evaluation. Here we are only evaluating the `osbench` for now. +- `model_config`, e.g. `eval_gpt4_1106_preview`, is the config group name for your +LLM settings, as defined in your `config.toml`. +- `git-version`, e.g. `HEAD`, is the git commit hash of the OpenDevin version you would +like to evaluate. It could also be a release tag like `0.6.2`. +- `agent`, e.g. `CodeActAgent`, is the name of the agent for benchmarks, defaulting +to `CodeActAgent`. +- `eval_limit`, e.g. `10`, limits the evaluation to the first `eval_limit` instances. By +default, the script evaluates the entire SWE-bench_Lite test set (300 issues). Note: +in order to use `eval_limit`, you must also set `agent`. + + +Following is the basic command to start the evaluation. You can update the arguments in the script `evaluation/agent_bench/scripts/run_infer.sh`, such as `--max-iterations`, `--eval-num-workers` and so on. @@ -57,5 +34,5 @@ You can update the arguments in the script `evaluation/agent_bench/scripts/run_i - `--eval-n-limit`: the number of examples to evaluate. For example, `100`. ```bash -./evaluation/agent_bench/scripts/run_infer.sh eval_gpt35_turbo 0.6.2 CodeActAgent 1 +./evaluation/agent_bench/scripts/run_infer.sh eval_gpt35_turbo HEAD CodeActAgent 1 ``` diff --git a/evaluation/agent_bench/helper.py b/evaluation/agent_bench/helper.py index 3a11b5f0b221..ca17e4f97d43 100644 --- a/evaluation/agent_bench/helper.py +++ b/evaluation/agent_bench/helper.py @@ -14,7 +14,7 @@ def try_parse_answer(act) -> str | None: raw_ans = act.thought else: return None - agent_answer = re.findall(r'(.*?)', raw_ans) + agent_answer = re.findall(r'(.*?)', raw_ans, re.DOTALL) if not agent_answer: return None return agent_answer[0].strip() diff --git a/evaluation/agent_bench/run_infer.py b/evaluation/agent_bench/run_infer.py index d6edeb3ff961..77b7a1e0026d 100644 --- a/evaluation/agent_bench/run_infer.py +++ b/evaluation/agent_bench/run_infer.py @@ -1,10 +1,9 @@ import asyncio -import logging import os import re -import shutil +import tempfile +from typing import Any -import docker import pandas as pd from datasets import load_dataset @@ -16,64 +15,175 @@ ) from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.events.action import CmdRunAction, MessageAction -from opendevin.llm.llm import LLM -from opendevin.runtime.docker.ssh_box import DockerSSHBox +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import AgentFinishAction, CmdRunAction, MessageAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime + + +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + init_cmd = instance.init + if init_cmd is not None: + script_name = f'{instance.instance_id}_init.sh' + + with tempfile.TemporaryDirectory() as tmpdir: + host_script_path = os.path.join(tmpdir, script_name) + create_sh_file(host_script_path, init_cmd) + await runtime.copy_to( + host_script_path, + '/workspace', + ) + + logger.info(f'Running init script: {script_name}') + action = CmdRunAction(command=f'chmod +x ./{script_name} && ./{script_name}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 -config = load_app_config() + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def complete_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation + + agent_answer = None + get_agent_result_cmd = instance.get_agent_result + if get_agent_result_cmd is not None: + script_name = 'get_agent_result.sh' + + with tempfile.TemporaryDirectory() as tmpdir: + host_script_path = os.path.join(tmpdir, script_name) + create_sh_file(host_script_path, get_agent_result_cmd) + await runtime.copy_to( + host_script_path, + '/workspace', + ) + logger.info(f'Running get agent result cmd: {script_name}') + + action = CmdRunAction( + command=f'chmod +x ./{script_name} && ./{script_name}', + keep_prompt=False, + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + agent_answer = obs.content + # IF the agent answer is not found, retrieve it from the history + # We wait until the controller finishes + + final_ans = None + if instance.ground_truth is not None: + final_ans = instance.ground_truth + else: + get_ground_truth_cmd = instance.get_ground_truth + if get_ground_truth_cmd is not None: + script_name = 'get_ground_truth.sh' + with tempfile.TemporaryDirectory() as tmpdir: + host_script_path = os.path.join(tmpdir, script_name) + create_sh_file(host_script_path, get_ground_truth_cmd) + await runtime.copy_to( + host_script_path, + '/workspace', + ) + logger.info(f'Running get ground truth cmd: {script_name}') + action = CmdRunAction( + command=f'chmod +x ./{script_name} && ./{script_name}', + keep_prompt=False, + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + final_ans = obs.content -def process_instance( + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") + return { + 'final_ans': final_ans, + 'agent_answer': agent_answer, + } + + +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - - inst_id = instance.instance_id - question = instance.description - # create a directory for the instance's workspace - instance_workspace = str(os.path.join(config.workspace_base, inst_id)) - container_inst_workspace = str( - os.path.join(config.workspace_mount_path_in_sandbox, inst_id) - ) - if os.path.exists(instance_workspace): - shutil.rmtree(instance_workspace) - os.makedirs(instance_workspace, exist_ok=True) +) -> EvalOutput: + config = get_config(metadata) - # Set up the logger properly, so you can run multiprocessing to parallel the evaluation + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{inst_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {inst_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance.instance_id}.') # ============================================= # build instruction @@ -86,97 +196,68 @@ def process_instance( 'Please encapsulate your final answer (answer ONLY) within and .\n' 'For example: The answer to the question is 42 .\n' '# Problem \n' - f'{question}\n\n' + f'{instance.description}\n\n' ) instruction += ( 'IMPORTANT: You should ONLY interact with the environment provided ' 'to you AND NEVER ASK FOR HUMAN HELP.\n' ) # NOTE: You can actually set slightly different instruction for different agents - instruction += INST_SUFFIXES[agent.__class__.__name__] + instruction += INST_SUFFIXES[metadata.agent_class] # ============================================= # create sandbox and run the agent # ============================================= - sandbox = DockerSSHBox() - sandbox.execute(f'cd {inst_id}') + runtime: Runtime = await create_runtime(config, sid=instance.instance_id) - init_cmd = instance.init - if init_cmd is not None: - scpt_name = f'{instance.instance_id}_init.sh' - scpt_path = os.path.join(container_inst_workspace, scpt_name) - host_scpt_path = os.path.join(instance_workspace, scpt_name) - create_sh_file(host_scpt_path, init_cmd) - logger.info(f'Running init script: {scpt_path}') - _, init_res = sandbox.execute(scpt_path) - logger.info(f'Init script result: {init_res}') + await initialize_runtime(runtime, instance=instance) # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=FAKE_RESPONSES[agent.__class__.__name__], - sandbox=sandbox, - sid=inst_id, - ) + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=FAKE_RESPONSES[metadata.agent_class], ) - if state is None: raise ValueError('State should not be None.') - # get the ground truth - # OSBenchSSHBox.get_ground_truth(instance, state) - # ============================================= # result evaluation # ============================================= - agent_answer = '' - get_agent_result_cmd = instance.get_agent_result - if get_agent_result_cmd is not None: - scpt_name = f'{instance.instance_id}_get_agent_result.sh' - scpt_path = os.path.join(container_inst_workspace, scpt_name) - host_scpt_path = os.path.join(instance_workspace, scpt_name) - create_sh_file(host_scpt_path, get_agent_result_cmd) - logger.info(f'Running get agent result cmd: {scpt_path}') - _, agent_answer = sandbox.execute(scpt_path) - else: + return_val = await complete_runtime(runtime, instance) + agent_answer = return_val['agent_answer'] + final_ans = return_val['final_ans'] + + # If the agent answer is not found, retrieve it from the history + if agent_answer is None: + agent_answer = '' logger.info('Retrieving agent answer from history.') raw_ans = '' # retrieve the last agent message or thought for event in state.history.get_events(reverse=True): - if isinstance(event, MessageAction) and event.source == 'agent': - raw_ans = event.content - elif isinstance(event, CmdRunAction) and event.source == 'agent': - raw_ans = event.thought + if event.source == 'agent': + if isinstance(event, AgentFinishAction): + raw_ans = event.thought + break + elif isinstance(event, MessageAction): + raw_ans = event.content + break + elif isinstance(event, CmdRunAction): + raw_ans = event.thought + break # parse the answer for a solution tag - agent_answer = re.findall(r'(.*?)', raw_ans) + agent_answer = re.findall(r'(.*?)', raw_ans, re.DOTALL) if len(agent_answer) == 0: logger.warning(f'Failed to parse model answer: {raw_ans}') agent_answer = raw_ans else: agent_answer = agent_answer[0] - final_ans = '' - if instance.ground_truth is not None: - final_ans = instance.ground_truth - else: - get_ground_truth_cmd = instance.get_ground_truth - if get_ground_truth_cmd is not None: - scpt_name = f'{instance.instance_id}_get_ground_truth.sh' - scpt_path = os.path.join(container_inst_workspace, scpt_name) - host_scpt_path = os.path.join(instance_workspace, scpt_name) - create_sh_file(host_scpt_path, get_ground_truth_cmd) - logger.info(f'Running get ground truth cmd: {scpt_path}') - sandbox.execute(f'cd {container_inst_workspace}') - _, final_ans = sandbox.execute(scpt_path) - comparison_method = instance.comparison_method logger.info( f'Final message: {agent_answer} | Ground truth: {final_ans} | Comparison method: {comparison_method}' @@ -191,58 +272,49 @@ def process_instance( metrics = state.metrics.get() if state.metrics else None # Save the output - output = { - 'instance_id': inst_id, - 'instance': instance.to_dict(), - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': { + output = EvalOutput( + instance_id=instance.instance_id, + instance=instance.to_dict(), + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ 'agent_answer': agent_answer, 'final_answer': final_ans, 'check_method': comparison_method, 'result': test_result, }, - } - - # clean up - if os.path.exists(instance_workspace): - shutil.rmtree(instance_workspace) - # Close the sandbox - try: - sandbox.close() - except docker.errors.NotFound as e: - logger.error(f'Failed to close sandbox: {e}') + ) return output if __name__ == '__main__': - id_column = 'instance_id' args = parse_arguments() dataset = load_dataset('iFurySt/AgentBench') agent_bench_tests = dataset['osbench'].to_pandas() - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + 'AgentBench-OS', args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(agent_bench_tests, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/agent_bench/scripts/run_infer.sh b/evaluation/agent_bench/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/biocoder/README.md b/evaluation/biocoder/README.md index 6829e1aaa55a..c549afc9c141 100644 --- a/evaluation/biocoder/README.md +++ b/evaluation/biocoder/README.md @@ -2,15 +2,12 @@ Implements evaluation of agents on BioCoder from the BioCoder benchmark introduced in [BioCoder: A Benchmark for Bioinformatics Code Generation with Large Language Models](https://arxiv.org/abs/2308.16458). Please see [here](https://github.com/bigcode-project/bigcode-evaluation-harness/blob/main/bigcode_eval/tasks/humanevalpack.py) for the reference implementation used in the paper. -## Setup Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - - -## Configure OpenDevin and your LLM -Create a `config.toml` file if it does not exist at the root of the workspace. Please check [README.md](../../README.md) for how to set this up. +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## BioCoder Docker Image + In the opendevin branch of the Biocoder repository, we have slightly modified our original Docker image to work with the OpenDevin environment. In the Docker image are testing scripts (`/testing/start_test_opendevin.py` and aux files in `/testing_files/`) to assist with evaluation. Additionally, we have installed all dependencies, including OpenJDK, mamba (with Python 3.6), and many system libraries. Notably, we have **not** packaged all repositories into the image, so they are downloaded at runtime. **Before first execution, pull our Docker image with the following command** @@ -41,12 +38,12 @@ to `CodeActAgent`. - `eval_limit`, e.g. `10`, limits the evaluation to the first `eval_limit` instances. By default it infers all instances. Let's say you'd like to run 1 instance using `eval_gpt4_1106_eval_gpt4o_2024_05_13preview` and CodeActAgent -with OpenDevin version 0.6.2, then your command would be: +with current OpenDevin version, then your command would be: ## Examples ```bash -./evaluation/biocoder/scripts/run_infer.sh eval_gpt4o_2024_05_13 0.6.2 CodeActAgent 1 +./evaluation/biocoder/scripts/run_infer.sh eval_gpt4o_2024_05_13 HEAD CodeActAgent 1 ``` ## Reference diff --git a/evaluation/biocoder/biocoder_env_box.py b/evaluation/biocoder/biocoder_env_box.py deleted file mode 100644 index f589535ca3a4..000000000000 --- a/evaluation/biocoder/biocoder_env_box.py +++ /dev/null @@ -1,387 +0,0 @@ -import json -import os -import re -import sys -from collections import defaultdict -from dataclasses import dataclass - -from datasets import load_dataset - -from opendevin.core.config import load_app_config -from opendevin.core.logger import opendevin_logger as logger -from opendevin.runtime.docker.ssh_box import DockerSSHBox -from opendevin.runtime.plugins import ( - JupyterRequirement, - PluginRequirement, - SWEAgentCommandsRequirement, -) - -config = load_app_config() - -BIOCODER_BENCH_CONTAINER_IMAGE = 'public.ecr.aws/i5g0m1f6/eval_biocoder:v1.0' - - -@dataclass -class BiocoderData: - filePath: str - numLines: int - lineStart: int - lineEnd: int - signature: str - comment: str - content: str - repository: str - promptSummaryOnly: str - contextCode: str - goldenCode: str - test_case_id: str - language: str - - def to_dict(self): - return { - 'filePath': self.filePath, - 'numLines': self.numLines, - 'lineStart': self.lineStart, - 'lineEnd': self.lineEnd, - 'signature': self.signature, - 'comment': self.comment, - 'content': self.content, - 'repository': self.repository, - 'promptSummaryOnly': self.promptSummaryOnly, - 'contextCode': self.contextCode, - 'goldenCode': self.goldenCode, - 'test_case_id': self.test_case_id, - 'language': self.language, - } - - -def get_likely_indent_size(array_of_tabs) -> int: - sizes = defaultdict(int) - - for i in range(len(array_of_tabs) - 1): - diff = array_of_tabs[i + 1] - array_of_tabs[i] - if diff > 0: - sizes[diff] += 1 - if len(sizes) == 0: - return 4 - return int(max(sizes, key=sizes.get)) - - -class BiocoderSSHBox(DockerSSHBox): - def __init__( - self, - container_image: str, - timeout: int = 120, - sid: str | None = None, - biocoder_instance_id: str | None = None, - biocoder_instance: BiocoderData | None = None, - skip_workspace_mount: bool = True, - sandbox_plugins: list[PluginRequirement] = [], # noqa: B006 - biocoder_cache_folder: str = 'biocoder_cache', - workspace_dir_name: str | None = None, - ): - if biocoder_instance_id is None: - raise ValueError('biocoder_instance_id must be provided') - self.biocoder_instance_id = biocoder_instance_id - self.biocoder_instance = biocoder_instance - self.skip_workspace_mount = skip_workspace_mount - self.biocoder_cache_folder = biocoder_cache_folder - self.first_line_after_removed = None - self.workspace_dir_name = workspace_dir_name - self.workspace_base = config.workspace_base - self.workspace_mount_path = config.workspace_mount_path - # self.workspace_dir_name_host = os.path.join(config.workspace_base, workspace_dir_name) - - self.context_path = None - self.generated_path = None - self.golden_path = None - - assert ( - container_image is not None - ), 'container_image is required for BiocoderBenchSSHBox!' - super().__init__(container_image, timeout, sid) - self.init_plugins(sandbox_plugins) - - @property - def volumes(self): - if self.skip_workspace_mount: - return { - k: v - for k, v in super().volumes.items() - if not v['bind'] == self.sandbox_workspace_dir - } - return super().volumes - - def get_target_filepath(self): - target_filepath = os.path.join( - self.workspace_mount_path, - self.biocoder_instance.repository.split('/')[1], - self.biocoder_instance.filePath, - ) - return target_filepath - - def get_changed_code(self, include_signature=False): - # copies changed code into /testing_files/ - # Note that this does NOT copy the function signature - target_filepath = self.get_target_filepath() - selected_lines = [] - offset = 1 if include_signature else 0 - if self.first_line_after_removed is None: - logger.warning('First line after removed is None') - with open(target_filepath, 'r') as f: - lines = f.read().split('\n') - for i in range(self.biocoder_instance.lineStart - offset, len(lines)): - if lines[i].strip() == self.first_line_after_removed.strip(): - break - selected_lines.append(lines[i]) - text = '\n'.join(selected_lines) - return text - - def copy_changed_code(self): - changed_code = self.get_changed_code(include_signature=True) - with open(self.generated_path, 'w') as f: - f.write(changed_code) - exit_code, output = self.execute_and_check( - f'cp -r /workspace/{self.biocoder_cache_folder}/* /testing_files', - 'Failed to copy the files', - ) - - def remove_code(self): - comment_prefix = {'python': '#', 'java': '//'} - - target_filepath = self.get_target_filepath() - line_start = self.biocoder_instance.lineStart - line_end = self.biocoder_instance.lineEnd - with open(target_filepath, 'r') as f: - lines = f.read().split('\n') - # print("="*10+"ORIGINAL"+"="*10) - # print("\n".join(lines)) - signature_line = lines[line_start - 1] - - # get the number of tabs - def get_indent_size(s: str): - return len(re.match(r'\s*', s).group()) - - indent_sizes = list(map(get_indent_size, lines)) - indent_size = get_likely_indent_size(indent_sizes) - comment_indent_size = get_indent_size(signature_line) + indent_size - lines = ( - lines[:line_start] - + [ - f"{' '*comment_indent_size+comment_prefix[self.biocoder_instance.language.lower()]}TODO: replace with your code here" - ] - + ([''] * 2) - + lines[line_end:] - ) - first_line_after_removed_index = line_start - while len( - lines[first_line_after_removed_index].strip() - ) == 0 and first_line_after_removed_index < len(lines): - first_line_after_removed_index += 1 - self.first_line_after_removed = lines[first_line_after_removed_index] - # print("FIRST LINE AFTER REMOVED: ", self.first_line_after_removed) - - with open(target_filepath, 'w') as f: - f.write('\n'.join(lines)) - - # with open(target_filepath, 'r') as f: - # print("="*10+"MODIFIED"+"="*10) - # print(f.read()) - - def execute_and_check(self, cmd: str, error_msg: str) -> tuple[int, str]: - exit_code, output = self.execute(cmd) - if exit_code != 0: - logger.error(error_msg) - sys.exit(1) - return exit_code, output - - @classmethod - def get_box_for_instance( - cls, - instance, - workspace_dir_name=None, - skip_workspace_mount: bool = False, - workspace_mount_path: str | None = None, - sandbox_plugins: list[PluginRequirement] = [], # noqa: B006 - ) -> 'BiocoderSSHBox': - """This method initializes a container image, then runs some initialization commands""" - if workspace_dir_name is None: - workspace_dir_name = f'{instance.repository}__{instance.test_case_id[:10]}__{os.getpid()}'.replace( - '/', '__' - ) - - workspace_base = str(os.path.join(config.workspace_base, workspace_dir_name)) - old_workspace_base = config.workspace_base - old_workspace_mount_path = config.workspace_mount_path - - try: - config.workspace_base = workspace_base - config.workspace_mount_path = workspace_base - - # linting python after editing helps LLM fix indentations - config.sandbox.enable_auto_lint = True - - # create folder for transferring files back/forth - biocoder_cache_folder = 'biocoder_cache' - if not os.path.exists(os.path.join(workspace_base, biocoder_cache_folder)): - os.makedirs( - os.path.join(workspace_base, biocoder_cache_folder), exist_ok=True - ) - - file_ext = { - 'python': 'py', - 'java': 'java', - 'c': 'c', - 'cpp': 'cpp', - 'javascript': 'js', - 'typescript': 'ts', - }[instance.language.lower()] - - context_path = os.path.join( - workspace_base, biocoder_cache_folder, 'context.' + file_ext - ) - generated_path = os.path.join( - workspace_base, biocoder_cache_folder, 'generated.' + file_ext - ) - golden_path = os.path.join( - workspace_base, biocoder_cache_folder, 'golden.' + file_ext - ) - - # print(instance.contextCode) - with open(context_path, 'w') as f: - f.write(instance.contextCode) - with open(generated_path, 'w') as f: - f.write(instance.goldenCode) - with open(golden_path, 'w') as f: - f.write(instance.goldenCode) - - testcase_json = { - 'test_case_id': instance.test_case_id, - 'num_cases': 1000, - 'language': instance.language.lower(), - } - - with open( - os.path.join( - workspace_base, biocoder_cache_folder, 'testcase_biocoder.json' - ), - 'w', - ) as f: - f.write(json.dumps(testcase_json, indent=4)) - - # linting python after editing helps LLM fix indentations - config.sandbox.enable_auto_lint = True - - sandbox = cls( - container_image=BIOCODER_BENCH_CONTAINER_IMAGE, - biocoder_instance_id=instance.test_case_id, - biocoder_instance=instance, - skip_workspace_mount=skip_workspace_mount, - sandbox_plugins=sandbox_plugins, - biocoder_cache_folder=biocoder_cache_folder, - workspace_dir_name=workspace_dir_name, - ) - except Exception: - raise - finally: - config.workspace_base = old_workspace_base - config.workspace_mount_path = old_workspace_mount_path - - sandbox.context_path = context_path - sandbox.generated_path = generated_path - sandbox.golden_path = golden_path - - logger.info(f'SSH box started for instance {instance.test_case_id}.') - # cd to the workspace - exit_code, output = sandbox.execute_and_check( - 'cd /workspace', 'Failed to cd to workspace' - ) - logger.info(f'cd to workspace: {output}') - - # download repository archive - repository_url = f"https://biocoder.lilbillbiscuit.com/repos/{instance.repository.split('/')[1]}.zip" - exit_code, output = sandbox.execute_and_check( - 'wget -O repo.zip ' + repository_url, 'Failed to download the repository' - ) - logger.info(f'Downloaded the repository: {output}') - exit_code, output = sandbox.execute_and_check( - 'unzip -o -q repo.zip', 'Failed to unzip the repository' - ) - logger.info(f'Unzipped the repository: {output}') - - # copy the context, generated and golden files to the /testing_files folder - exit_code, output = sandbox.execute_and_check( - f'cp -r /workspace/{biocoder_cache_folder}/* /testing_files', - 'Failed to copy the files', - ) - - # chmod 777 - exit_code, output = sandbox.execute_and_check( - 'chmod -R 777 /workspace', - 'Failed to chmod the files', - ) - - return sandbox - - -if __name__ == '__main__': - biocoder_dataset = load_dataset('Lilbillbiscuit/biocoder_public') - EXAMPLE_INSTANCE = biocoder_dataset['test'][0] - EXAMPLE_INSTANCE = BiocoderData(**EXAMPLE_INSTANCE) - - sandbox = BiocoderSSHBox.get_box_for_instance( - instance=EXAMPLE_INSTANCE, - workspace_mount_path='/home/ubuntu/OpenDevinBioCoder/workspace', - skip_workspace_mount=False, - sandbox_plugins=[JupyterRequirement(), SWEAgentCommandsRequirement()], - ) - - # PRE TEST - exit_code, output = sandbox.execute_and_check( - 'cd /testing', - 'Failed to cd /testing', - ) - logger.info(f'cd $REPO_PATH: {output}') - - exit_code, output = sandbox.execute_and_check( - 'whoami', - 'Failed to run whoami', - ) - logger.info(f'whoami: {output}') - - # TEST - exit_code, output = sandbox.execute( - '/home/devin/mambaforge/bin/mamba run -n test python3 /testing/start_test_opendevin.py' - ) - assert exit_code == 0, 'Expected exit code 0 (this should have passed)' - logger.info(f'$TEST_CMD:\n{output}') - - exit_code, output = sandbox.execute_and_check( - 'cat /testing_files/results_biocoder.json', 'Failed to read the result file' - ) - - print(output) - json_obj = json.loads(output) - if json_obj['result'] == 'pass': - print('PASS') - else: - print('FAIL') - - sys.stdout.flush() - try: - while True: - try: - user_input = input('>>> ') - except EOFError: - logger.info('Exiting...') - break - if user_input.lower() == 'exit': - logger.info('Exiting...') - break - exit_code, output = sandbox.execute(user_input) - logger.info('exit code: %d', exit_code) - logger.info(output) - sys.stdout.flush() - except KeyboardInterrupt: - logger.info('Exiting...') - sandbox.close() diff --git a/evaluation/biocoder/run_infer.py b/evaluation/biocoder/run_infer.py index 3fb14e8bd76c..3fb213744063 100644 --- a/evaluation/biocoder/run_infer.py +++ b/evaluation/biocoder/run_infer.py @@ -1,33 +1,38 @@ import asyncio +import functools import json -import logging import os -import pathlib -from functools import partial +import tempfile +from typing import Any import pandas as pd from datasets import load_dataset -from evaluation.biocoder.biocoder_env_box import BiocoderData, BiocoderSSHBox +from evaluation.biocoder.utils import BiocoderData from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import CmdRunAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { - 'CodeActAgent': partial( + 'CodeActAgent': functools.partial( codeact_user_response, encapsulate_solution=True, try_parse=None ), } @@ -36,111 +41,218 @@ 'CodeActAgent': 'When you think you have fixed the issue through code changes, please run the following command: exit .\n' } +FILE_EXT_MAP = { + 'python': 'py', + 'java': 'java', + 'c': 'c', + 'cpp': 'cpp', + 'javascript': 'js', + 'typescript': 'ts', +} + + +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + BIOCODER_BENCH_CONTAINER_IMAGE = 'public.ecr.aws/i5g0m1f6/eval_biocoder:v1.0' + + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image=BIOCODER_BENCH_CONTAINER_IMAGE, + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, + instance: BiocoderData, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + file_ext = FILE_EXT_MAP[instance.language.lower()] + + action = CmdRunAction(command='mkdir -p /workspace && mkdir -p /testing_files') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + with tempfile.TemporaryDirectory() as tmpdir: + context_path = os.path.join(tmpdir, 'context.' + file_ext) + with open(context_path, 'w') as f: + f.write(instance.contextCode) + await runtime.copy_to(context_path, '/testing_files') + + golden_path = os.path.join(tmpdir, 'golden.' + file_ext) + with open(golden_path, 'w') as f: + f.write(instance.goldenCode) + await runtime.copy_to(golden_path, '/testing_files') + + testcase_json = { + 'test_case_id': instance.test_case_id, + 'num_cases': 1000, + 'language': instance.language.lower(), + } + testcase_path = os.path.join(tmpdir, 'testcase_biocoder.json') + with open(testcase_path, 'w') as f: + f.write(json.dumps(testcase_json, indent=4)) + + await runtime.copy_to(testcase_path, '/testing_files') + + # setup paths + remove_code_script = os.path.join( + os.path.dirname(__file__), 'scripts', 'setup', 'remove_code.py' + ) + await runtime.copy_to(remove_code_script, '/testing_files') + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + # download repository archive + repository_url = f"https://biocoder.lilbillbiscuit.com/repos/{instance.repository.split('/')[1]}.zip" + action = CmdRunAction(command='wget -O repo.zip ' + repository_url) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0, f'Failed to download the repository: {obs.content}' + + # unzip the repository + action = CmdRunAction(command='unzip -o -q repo.zip && rm repo.zip') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0, f'Failed to unzip the repository: {obs.content}' + + # chmod 777 + action = CmdRunAction(command='chmod -R 777 /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0, f'Failed to chmod the files: {obs.content}' + + # remove code for evaluation instance + target_filepath = os.path.join( + '/workspace', instance.repository.split('/')[1], instance.filePath + ) + line_start = instance.lineStart + line_end = instance.lineEnd + language = instance.language.lower() + action = CmdRunAction( + command=f'python3 /testing_files/remove_code.py --target_filepath {target_filepath} --line_start {line_start} --line_end {line_end} --language {language}' + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0, f'Failed to remove the code: {obs.content}' + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def complete_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation -def get_test_result(instance, sandbox, workspace_dir_name): test_result = {'result': {}, 'metadata': {}} - try: - code = sandbox.get_changed_code(include_signature=True) - sandbox.copy_changed_code() + + copy_changed_code_script = os.path.join( + os.path.dirname(__file__), 'scripts', 'setup', 'copy_changed_code.py' + ) + await runtime.copy_to(copy_changed_code_script, '/testing_files') + + file_ext = FILE_EXT_MAP[instance.language.lower()] + target_filepath = os.path.join( + '/workspace', instance.repository.split('/')[1], instance.filePath + ) + generated_path = os.path.join('/testing_files', 'generated.' + file_ext) + + action = CmdRunAction( + command=f'python3 /testing_files/copy_changed_code.py --target_filepath {target_filepath} --generated_code_filepath {generated_path} --line_start {instance.lineStart} --include_signature' + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + if obs.exit_code == 0: test_result['metadata']['1_copy_change_success'] = True + + action = CmdRunAction(command=f'cat {generated_path}', keep_prompt=False) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + code = obs.content test_result['metadata']['1_copy_change_code'] = code - except Exception: - logger.error('Error fetching changed code for this instance') + else: test_result['metadata']['1_copy_change_success'] = False test_result['metadata']['1_copy_change_code'] = None - exit_code, output = sandbox.execute_and_check( - 'cd /testing', - 'Failed to cd /testing', - ) - logger.info(f'cd $REPO_PATH: {output}') - - exit_code, output = sandbox.execute_and_check( - 'whoami', - 'Failed to run whoami', - ) - logger.info(f'whoami: {output}') + action = CmdRunAction(command='cd /testing_files') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 - exit_code, output = sandbox.execute( - '/home/devin/mambaforge/bin/mamba run -n test python3 /testing/start_test_opendevin.py' + action = CmdRunAction( + command='/home/devin/mambaforge/bin/mamba run -n test python3 /testing/start_test_opendevin.py' ) - logger.info(f'$TEST_CMD:\n{output}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 - exit_code, output = sandbox.execute_and_check( - 'cat /testing_files/results_biocoder.json', 'Failed to read the result file' + action = CmdRunAction( + command='cat /testing_files/results_biocoder.json', keep_prompt=False ) - if exit_code == 0: + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + if obs.exit_code == 0: test_result['metadata']['2_run_test_success'] = True - test_result['metadata']['2_run_test_result'] = str(output) + test_result['metadata']['2_run_test_result'] = str(obs.content) + json_obj = json.loads(obs.content) + test_result['result'] = json_obj['result'] else: test_result['metadata']['2_run_test_success'] = False - test_result['metadata']['2_run_test_result'] = str(output) - json_obj = json.loads(output) - test_result['result'] = json_obj['result'] + test_result['metadata']['2_run_test_result'] = str(obs.content) + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") return test_result -def process_instance( +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) +) -> EvalOutput: + config = get_config(metadata) instance = BiocoderData(**instance) print(instance) - workspace_dir_name = ( - f'{instance.repository}__{instance.test_case_id[:10]}__{os.getpid()}'.replace( - '/', '__' - ) - ) - workspace_mount_path = os.path.join(config.workspace_base, workspace_dir_name) - # create process-specific workspace dir - # if `not skip_workspace_mount` - we will create a workspace directory for EACH process - # so that different agent don't interfere with each other. - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # Setup the logger properly, so you can run multi-processing to parallize the evaluation - if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{instance.test_case_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance.test_case_id}.\nHint: run "tail -f {log_file}" to see live logs in a seperate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) + instance_id = f'{instance.repository}__{instance.instance_id[:10]}' - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # NOTE: this is something special we do for SWE-Bench due to the reason described in the previous section - # You can omit this if you don't need to setup specialized sandbox - workspace_dir_name = f'{instance.repository}__{instance.test_case_id[:10]}'.replace( - '/', '__' - ) - sandbox = BiocoderSSHBox.get_box_for_instance( - instance, - workspace_dir_name, - skip_workspace_mount=False, - workspace_mount_path=workspace_mount_path, - sandbox_plugins=agent.sandbox_plugins, - ) - - sandbox.remove_code() + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance_id}.') # Prepare instruction instruction = ( @@ -160,80 +272,76 @@ def process_instance( 'Make sure to include proper formatting in Java and Python, including correct braces and/or indentation.\n' ) # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] # use a session id for concurrent evaluation - sid = instance.test_case_id.replace('/', '__') + sid = instance.instance_id.replace('/', '__') + + runtime = await create_runtime(config, sid=sid) + + await initialize_runtime(runtime, instance) # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[ - agent.__class__.__name__ - ], - sandbox=sandbox, - sid=sid, - ) + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], ) - test_result = get_test_result(instance, sandbox, workspace_dir_name) - if state is None: raise ValueError('State should not be None.') - metrics = state.metrics.get() if state.metrics else None + test_result = await complete_runtime(runtime, instance) + metrics = state.metrics.get() if state.metrics else None # history is now available as a stream of events, rather than list of pairs of (Action, Observation) # for compatibility with the existing output format, we can remake the pairs here # remove when it becomes unnecessary histories = state.history.compatibility_for_eval_history_pairs() + test_result['generated'] = test_result['metadata']['1_copy_change_code'] + # Save the output - output = { - 'test_case_id': instance.test_case_id, - 'biocoder_instance': instance.to_dict(), - 'instruction': instruction, - 'generated': test_result['metadata']['1_copy_change_code'], - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': test_result, - } - - # Close the sandbox - sandbox.close() + output = EvalOutput( + instance_id=instance.instance_id, + instance=instance.to_dict(), + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result=test_result, + ) return output if __name__ == '__main__': - id_column = 'test_case_id' args = parse_arguments() + dataset = load_dataset('lilbillbiscuit/biocoder_public') - biocoder_tests = dataset['test'].to_pandas() + biocoder_tests = dataset['train'].to_pandas() + biocoder_tests['instance_id'] = biocoder_tests['test_case_id'] - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + 'biocoder', args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(biocoder_tests, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/biocoder/scripts/run_infer.sh b/evaluation/biocoder/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/biocoder/scripts/setup/copy_changed_code.py b/evaluation/biocoder/scripts/setup/copy_changed_code.py new file mode 100644 index 000000000000..2cee1e97b66f --- /dev/null +++ b/evaluation/biocoder/scripts/setup/copy_changed_code.py @@ -0,0 +1,45 @@ +import argparse + + +def get_changed_code(target_filepath, line_start, include_signature=False): + # copies changed code into /testing_files/ + # Note that this does NOT copy the function signature + selected_lines = [] + offset = 1 if include_signature else 0 + + with open('/testing_files/first_line_after_removed.txt', 'r') as f: + first_line_after_removed = f.read() + if first_line_after_removed is None: + print('First line after removed is None') + + with open(target_filepath, 'r') as f: + lines = f.read().split('\n') + for i in range(line_start - offset, len(lines)): + if lines[i].strip() == first_line_after_removed.strip(): + break + selected_lines.append(lines[i]) + text = '\n'.join(selected_lines) + return text + + +def copy_changed_code( + target_filepath, generated_code_filepath, line_start, include_signature=False +): + changed_code = get_changed_code(target_filepath, line_start, include_signature) + with open(generated_code_filepath, 'w') as f: + f.write(changed_code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--target_filepath', type=str, required=True) + parser.add_argument('--generated_code_filepath', type=str, required=True) + parser.add_argument('--line_start', type=int, required=True) + parser.add_argument('--include_signature', action='store_true') + args = parser.parse_args() + copy_changed_code( + args.target_filepath, + args.generated_code_filepath, + args.line_start, + args.include_signature, + ) diff --git a/evaluation/biocoder/scripts/setup/remove_code.py b/evaluation/biocoder/scripts/setup/remove_code.py new file mode 100644 index 000000000000..3c76a41738d5 --- /dev/null +++ b/evaluation/biocoder/scripts/setup/remove_code.py @@ -0,0 +1,74 @@ +import argparse +import os +import re +from collections import defaultdict + + +def get_likely_indent_size(array_of_tabs) -> int: + sizes = defaultdict(int) + + for i in range(len(array_of_tabs) - 1): + diff = array_of_tabs[i + 1] - array_of_tabs[i] + if diff > 0: + sizes[diff] += 1 + if len(sizes) == 0: + return 4 + return int(max(sizes, key=sizes.get)) + + +def get_target_filepath(self): + target_filepath = os.path.join( + self.workspace_mount_path, + self.biocoder_instance.repository.split('/')[1], + self.biocoder_instance.filePath, + ) + return target_filepath + + +def remove_code(target_filepath: str, line_start: int, line_end: int, language: str): + comment_prefix = {'python': '#', 'java': '//'} + + with open(target_filepath, 'r') as f: + lines = f.read().split('\n') + # print("="*10+"ORIGINAL"+"="*10) + # print("\n".join(lines)) + signature_line = lines[line_start - 1] + + # get the number of tabs + def get_indent_size(s: str): + return len(re.match(r'\s*', s).group()) + + indent_sizes = list(map(get_indent_size, lines)) + indent_size = get_likely_indent_size(indent_sizes) + comment_indent_size = get_indent_size(signature_line) + indent_size + lines = ( + lines[:line_start] + + [ + f"{' '*comment_indent_size+comment_prefix[language.lower()]}TODO: replace with your code here" + ] + + ([''] * 2) + + lines[line_end:] + ) + first_line_after_removed_index = line_start + while len( + lines[first_line_after_removed_index].strip() + ) == 0 and first_line_after_removed_index < len(lines): + first_line_after_removed_index += 1 + + first_line_after_removed = lines[first_line_after_removed_index] + print('FIRST LINE AFTER REMOVED: ', first_line_after_removed) + with open('/testing_files/first_line_after_removed.txt', 'w') as f: + f.write(first_line_after_removed) + + with open(target_filepath, 'w') as f: + f.write('\n'.join(lines)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--target_filepath', type=str, required=True) + parser.add_argument('--line_start', type=int, required=True) + parser.add_argument('--line_end', type=int, required=True) + parser.add_argument('--language', type=str, required=True) + args = parser.parse_args() + remove_code(args.target_filepath, args.line_start, args.line_end, args.language) diff --git a/evaluation/biocoder/utils.py b/evaluation/biocoder/utils.py new file mode 100644 index 000000000000..2376d6104165 --- /dev/null +++ b/evaluation/biocoder/utils.py @@ -0,0 +1,36 @@ +from dataclasses import dataclass + + +@dataclass +class BiocoderData: + instance_id: str + filePath: str + numLines: int + lineStart: int + lineEnd: int + signature: str + comment: str + content: str + repository: str + promptSummaryOnly: str + contextCode: str + goldenCode: str + test_case_id: str + language: str + + def to_dict(self): + return { + 'filePath': self.filePath, + 'numLines': self.numLines, + 'lineStart': self.lineStart, + 'lineEnd': self.lineEnd, + 'signature': self.signature, + 'comment': self.comment, + 'content': self.content, + 'repository': self.repository, + 'promptSummaryOnly': self.promptSummaryOnly, + 'contextCode': self.contextCode, + 'goldenCode': self.goldenCode, + 'test_case_id': self.test_case_id, + 'language': self.language, + } diff --git a/evaluation/bird/README.md b/evaluation/bird/README.md index 05e0fd8021f1..072da010f858 100644 --- a/evaluation/bird/README.md +++ b/evaluation/bird/README.md @@ -2,43 +2,14 @@ Implements evaluation of agents on BIRD introduced in [Can LLM Already Serve as A Database Interface? A BIg Bench for Large-Scale Database Grounded Text-to-SQLs](https://arxiv.org/abs/2305.03111). Please see [here](https://bird-bench.github.io/) for the reference implementation used in the paper. -## Setup Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - - -## Configure OpenDevin and your LLM - -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" - -[sandbox] -enable_auto_lint = true - -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on Bird ```bash -./evaluation/bird/scripts/run_infer.sh eval_gpt4_1106_preview [model_config] [git-version] +./evaluation/bird/scripts/run_infer.sh [model_config] [git-version] ``` - `model_config`, e.g. `eval_gpt4_1106_preview`, is the config group name for your diff --git a/evaluation/bird/run_infer.py b/evaluation/bird/run_infer.py index d370f6780ee1..86db5eb9eb20 100644 --- a/evaluation/bird/run_infer.py +++ b/evaluation/bird/run_infer.py @@ -1,12 +1,12 @@ import asyncio import json -import logging import os import pathlib import re -import shutil import sqlite3 import subprocess +import zipfile +from typing import Any import pandas as pd from datasets import load_dataset @@ -15,20 +15,24 @@ from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.events.action import MessageAction -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import CmdRunAction, MessageAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime def codeact_user_response(state: State) -> str: @@ -62,6 +66,27 @@ def codeact_user_response(state: State) -> str: } +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + def execute_sql(db_path, gen_sql, gold_sql): """Execute the generated SQL and the ground truth SQL and compare the results.""" with sqlite3.connect(db_path) as conn: @@ -76,12 +101,213 @@ def execute_sql(db_path, gen_sql, gold_sql): return res -def get_test_result(instance, path, timeout=30): +LOCAL_DATASET_PATH = os.path.join(os.path.dirname(__file__), 'data') + + +def load_bird(): + """Main function to handle the flow of downloading, processing, and loading the bird dataset.""" + + def _download_bird(): + """Downloads and extracts the bird dataset from a specified URL into a local directory.""" + devset_path = os.path.join(LOCAL_DATASET_PATH, 'dev') + if not os.path.exists(devset_path): + logger.info( + f'{LOCAL_DATASET_PATH} folder does not exist, starting download and extraction...' + ) + os.makedirs(LOCAL_DATASET_PATH, exist_ok=True) + + download_url = 'https://bird-bench.oss-cn-beijing.aliyuncs.com/dev.zip' + download_path = os.path.join(LOCAL_DATASET_PATH, 'dev.zip') + if not os.path.exists(download_path): + logger.info('Start Downloading...') + subprocess.run(['wget', download_url, '-O', download_path]) + logger.info('Download completed.') + + devset_path = os.path.join(LOCAL_DATASET_PATH, 'dev') + if not os.path.exists(devset_path): + logger.info('Start Extracting...') + os.makedirs(devset_path, exist_ok=True) + with zipfile.ZipFile(download_path, 'r') as zip_ref: + zip_ref.extractall(devset_path) + # move everything in 'dev_20240627' to the root folder + for file in os.listdir(os.path.join(devset_path, 'dev_20240627')): + os.rename( + os.path.join(devset_path, 'dev_20240627', file), + os.path.join(devset_path, file), + ) + os.rmdir(os.path.join(devset_path, 'dev_20240627')) + logger.info('Extraction completed.') + + # extract databases + database_path = os.path.join(devset_path, 'dev_databases.zip') + assert os.path.exists(database_path) + logger.info('Start Extracting...') + with zipfile.ZipFile(database_path, 'r') as zip_ref: + zip_ref.extractall(devset_path) + logger.info('Extraction completed.') + else: + logger.info(f'{LOCAL_DATASET_PATH} folder already exists.') + return devset_path + + def _extract_create_table_prompt(db_path, limit_value=0): + """Generates a SQL prompt with CREATE TABLE statements and sample data from the database.""" + table_query = "SELECT * FROM sqlite_master WHERE type='table';" + tables = sqlite3.connect(db_path).cursor().execute(table_query).fetchall() + prompt = '' + for table in tables: + table_name = table[1] + create_table_statement = table[-1] + + table_info_query = f'PRAGMA table_info(`{table_name}`);' + top_k_row_query = f'SELECT * FROM {table_name} LIMIT {limit_value};' + try: + headers = [ + x[1] + for x in sqlite3.connect(db_path) + .cursor() + .execute(table_info_query) + .fetchall() + ] + except Exception: + logger.error(f'Error Connection: {table_info_query}, {top_k_row_query}') + exit(0) + + prompt += create_table_statement + ';\n' + if limit_value > 0: + top_k_rows = ( + sqlite3.connect(db_path) + .cursor() + .execute(top_k_row_query) + .fetchall() + ) + prompt += ( + f"/*\n3 example rows:\n{top_k_row_query}\n{' '.join(headers)}\n" + ) + for row in top_k_rows: + row = [str(x) for x in row] + row = [x if x is not None else '' for x in row] + prompt += ' '.join(row) + '\n' + prompt += '*/\n' + prompt += '\n' + return prompt + + def _create_prompt(e, database_path): + """Create a prompt for the given example""" + db_id = e['db_id'] + db_path = pathlib.Path(database_path) / db_id / f'{db_id}.sqlite' + + # Extract the CREATE TABLE statements and sample data from the database + prompt = _extract_create_table_prompt(db_path) + prompt += f"-- External Knowledge: {e['evidence']}\n\n" + prompt += '-- Using valid SQLite and understanding External Knowledge, answer the following questions for the tables provided above.\n\n' + prompt += '-- Using valid SQLite, answer the following questions for the tables provided above.\n' + prompt += f"Question: {e['question']}\n" + + return prompt + + def _process_bird(dataset_path): + """Processes the raw bird dataset into a structured format and saves it as JSON.""" + processed_path = os.path.join(LOCAL_DATASET_PATH, 'dev', 'processed_dev.json') + if not os.path.exists(processed_path): + logger.info( + f'{processed_path} folder does not exist, starting processing...' + ) + raw_data_path = os.path.join(LOCAL_DATASET_PATH, 'dev', 'dev.json') + database_path = os.path.join(LOCAL_DATASET_PATH, 'dev', 'dev_databases') + processed_data = [] + with pathlib.Path(raw_data_path).open('r') as f: + data = json.load(f) + for e in tqdm(data): + item = { + 'instance_id': f'{len(processed_data)}', + 'db_path': os.path.join( + database_path, e['db_id'], f"{e['db_id']}.sqlite" + ), + 'db_id': e['db_id'], + 'instruction': _create_prompt(e, database_path), + 'SQL': e['SQL'], + } + processed_data.append(item) + + with pathlib.Path(processed_path).open('w') as f: + json.dump(processed_data, f, indent=2) + logger.info(f'Processed data saved to {processed_path}') + else: + logger.info(f'{processed_path} folder already exists.') + bird_dataset = load_dataset('json', data_files={'test': processed_path}) + return bird_dataset + + raw_dataset_path = _download_bird() + bird_dataset = _process_bird(raw_dataset_path) + return bird_dataset + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Copy the database to the workspace + db_file = os.path.join( + LOCAL_DATASET_PATH, + 'dev', + 'dev_databases', + instance.db_id, + f'{instance.db_id}.sqlite', + ) + await runtime.copy_to(db_file, '/workspace') + + # Check the database is copied + action = CmdRunAction( + command='cd /workspace && ls -l', + keep_prompt=False, + ) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + assert f'{instance.db_id}.sqlite' in obs.content + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def complete_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation + timeout = 30 + test_result = {'result': {}, 'metadata': {}} # Read the generated python file - with open(path, 'r') as f: - gen_file = f.read() + instance_id = instance.instance_id.replace('/', '__') + path = os.path.join('/workspace', f'{instance_id}.py') + + action = CmdRunAction( + command=f'cat {path}', + keep_prompt=False, + ) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + + if obs.exit_code != 0: + test_result['result'] = {'passed': 0, 'status': 'error'} + return test_result + + gen_file = obs.content.strip().replace('\r\n', '\n') # Extract the SQL from the python file gen_sql = '' @@ -96,7 +322,13 @@ def get_test_result(instance, path, timeout=30): # Execute the SQL try: res = func_timeout( - timeout, execute_sql, args=(instance.db_path, gen_sql, gold_sql) + timeout, + execute_sql, + args=( + instance.db_path, + gen_sql, + gold_sql, + ), ) status = 'success' except FunctionTimedOut: @@ -114,68 +346,28 @@ def get_test_result(instance, path, timeout=30): 'gen_sql': gen_sql, 'gold_sql': gold_sql, } + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") return test_result -def process_instance( +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - workspace_mount_path = os.path.join( - config.workspace_mount_path, 'bird_eval_workspace' - ) - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # reset workspace to config - config.workspace_mount_path = workspace_mount_path - - # Copy the database to the workspace - db_root = os.path.join( - config.workspace_base, 'evaluation_bird/dev/dev_databases', instance.db_id - ) - target_path = os.path.join(workspace_mount_path, f'{instance.db_id}') - if not os.path.exists(target_path): - logger.info(f'Copying database from {db_root} to {target_path}...') - shutil.copytree(db_root, target_path) - - # Set up the database path - database_path = os.path.join(instance.db_id, f'{instance.db_id}.sqlite') - +) -> EvalOutput: + config = get_config(metadata) # use session id for concurrent evaluation - sid = instance.task_id.replace('/', '__') + instance_id = instance.instance_id.replace('/', '__') # Set up the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, - 'logs', - f'instance_{sid}.log', - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance.task_id}.\nLOG: tail -f {log_file}' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance_id}.') # Create file with BIRD instance + database_path = os.path.join('/workspace', f'{instance.db_id}.sqlite') statements = f""" import sqlite3 def execute_sql(db_path, sql): @@ -192,12 +384,12 @@ def execute_sql(db_path, sql): result = execute_sql(db_path, sql) print(result) """ - path = os.path.join(config.workspace_mount_path, f'{sid}.py') + instruction = ( f'You are a SQL expert and need to complete the following text-to-SQL tasks.' f'\n\n{instance.instruction}\n\n' 'Please write the SQL in one line without line breaks.' - f'And write a new python file named {sid}.py to call the SQL you wrote.' + f'And write a new python file named {instance_id}.py to call the SQL you wrote.' 'You need to follow the code template below:' f'\n\n{statements}\n\n' 'Environment has been set up for you to start working.' @@ -208,23 +400,21 @@ def execute_sql(db_path, sql): 'You SHOULD INCLUDE PROPER INDENTATION in your edit commands.\n' ) # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + + runtime = await create_runtime(config, sid=instance_id) + await initialize_runtime(runtime, instance) + # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[ - agent.__class__.__name__ - ], - sid=sid, - ) + state: State | None = await run_controller( + config=config, + task_str=instruction, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], + runtime=runtime, ) # ======= Attempt to evaluate the agent's edits ======= - test_result = get_test_result(instance, path) + test_result = await complete_runtime(runtime, instance) # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. @@ -238,162 +428,43 @@ def execute_sql(db_path, sql): histories = state.history.compatibility_for_eval_history_pairs() # Save the output - output = { - 'task_id': instance.task_id, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': test_result, - } + output = EvalOutput( + instance_id=instance.instance_id, + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result=test_result, + ) return output -def load_bird(): - """Main function to handle the flow of downloading, processing, and loading the bird dataset.""" - raw_dataset_path = download_bird() - bird_dataset = process_bird(raw_dataset_path) - return bird_dataset - - -def download_bird(): - """Downloads and extracts the bird dataset from a specified URL into a local directory.""" - dataset_path = os.path.join(config.workspace_base, 'evaluation_bird') - devset_path = os.path.join(dataset_path, 'dev') - if not os.path.exists(dataset_path): - logger.info( - f'{dataset_path} folder does not exist, starting download and extraction...' - ) - os.makedirs(dataset_path, exist_ok=True) - download_url = 'https://bird-bench.oss-cn-beijing.aliyuncs.com/dev.zip' - download_path = os.path.join(dataset_path, 'dev.zip') - logger.info('Start Downloading...') - subprocess.run(['wget', download_url, '-O', download_path]) - logger.info('Download completed.') - logger.info('Start Extracting...') - subprocess.run(['unzip', download_path, '-d', dataset_path]) - # extract databases - devset_path = os.path.join(dataset_path, 'dev') - database_path = os.path.join(devset_path, 'dev_databases.zip') - subprocess.run(['unzip', database_path, '-d', devset_path]) - logger.info('Extraction completed.') - else: - logger.info(f'{dataset_path} folder already exists.') - return devset_path - - -def process_bird(dataset_path): - """Processes the raw bird dataset into a structured format and saves it as JSON.""" - processed_path = os.path.join(dataset_path, 'processed_dev.json') - if not os.path.exists(processed_path): - logger.info(f'{processed_path} folder does not exist, starting processing...') - raw_data_path = os.path.join(dataset_path, 'dev.json') - database_path = os.path.join(dataset_path, 'dev_databases') - processed_data = [] - with pathlib.Path(raw_data_path).open('r') as f: - data = json.load(f) - for e in tqdm(data): - item = { - 'task_id': f'{len(processed_data)}', - 'db_path': os.path.join( - database_path, e['db_id'], f"{e['db_id']}.sqlite" - ), - 'db_id': e['db_id'], - 'instruction': create_prompt(e, database_path), - 'SQL': e['SQL'], - } - processed_data.append(item) - - with pathlib.Path(processed_path).open('w') as f: - json.dump(processed_data, f, indent=2) - logger.info(f'Processed data saved to {processed_path}') - else: - logger.info(f'{processed_path} folder already exists.') - bird_dataset = load_dataset('json', data_files={'test': processed_path}) - return bird_dataset - - -def extract_create_table_prompt(db_path, limit_value=0): - """Generates a SQL prompt with CREATE TABLE statements and sample data from the database.""" - table_query = "SELECT * FROM sqlite_master WHERE type='table';" - tables = sqlite3.connect(db_path).cursor().execute(table_query).fetchall() - prompt = '' - for table in tables: - table_name = table[1] - create_table_statement = table[-1] - - table_info_query = f'PRAGMA table_info(`{table_name}`);' - top_k_row_query = f'SELECT * FROM {table_name} LIMIT {limit_value};' - try: - headers = [ - x[1] - for x in sqlite3.connect(db_path) - .cursor() - .execute(table_info_query) - .fetchall() - ] - except Exception: - logger.error(f'Error Connection: {table_info_query}, {top_k_row_query}') - exit(0) - - prompt += create_table_statement + ';\n' - if limit_value > 0: - top_k_rows = ( - sqlite3.connect(db_path).cursor().execute(top_k_row_query).fetchall() - ) - prompt += ( - f"/*\n3 example rows:\n{top_k_row_query}\n{' '.join(headers)}\n" - ) - for row in top_k_rows: - row = [str(x) for x in row] - row = [x if x is not None else '' for x in row] - prompt += ' '.join(row) + '\n' - prompt += '*/\n' - prompt += '\n' - return prompt - - -def create_prompt(e, database_path): - """Create a prompt for the given example""" - db_id = e['db_id'] - db_path = pathlib.Path(database_path) / db_id / f'{db_id}.sqlite' - - # Extract the CREATE TABLE statements and sample data from the database - prompt = extract_create_table_prompt(db_path) - prompt += f"-- External Knowledge: {e['evidence']}\n\n" - prompt += '-- Using valid SQLite and understanding External Knowledge, answer the following questions for the tables provided above.\n\n' - prompt += '-- Using valid SQLite, answer the following questions for the tables provided above.\n' - prompt += f"Question: {e['question']}\n" - - return prompt - - if __name__ == '__main__': - id_column = 'task_id' args = parse_arguments() bird_dataset = load_bird() dataset = bird_dataset['test'].to_pandas() + dataset.rename(columns={'task_id': 'instance_id'}, inplace=True) - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + 'BIRD', args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(dataset, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/bird/scripts/run_infer.sh b/evaluation/bird/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/browsing_delegation/README.md b/evaluation/browsing_delegation/README.md index 495ee0bd6bb0..fe71dacb88cb 100644 --- a/evaluation/browsing_delegation/README.md +++ b/evaluation/browsing_delegation/README.md @@ -5,30 +5,9 @@ Some of OpenDevin's agent supports agent delegation action, for example, CodeAct This evaluation tests whether CodeActAgent can correctly delegate the instruction from WebArena and MiniWob benchmark to the BrowsingAgent. If so, the browsing performance upper-bound of CodeActAgent will be the performance of BrowsingAgent. +## Setup Environment and LLM Configuration -## Setup Environment - -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to set up a local development environment for OpenDevin. - -## Configure OpenDevin and your LLM - -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview_llm] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model_llm] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference diff --git a/evaluation/browsing_delegation/run_infer.py b/evaluation/browsing_delegation/run_infer.py index 02e529fe0d4f..c4d5ecd4d6ea 100644 --- a/evaluation/browsing_delegation/run_infer.py +++ b/evaluation/browsing_delegation/run_infer.py @@ -1,5 +1,4 @@ import asyncio -import logging import os import re @@ -9,56 +8,61 @@ from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller # Only CodeActAgent can delegate to BrowsingAgent SUPPORTED_AGENT_CLS = {'CodeActAgent'} -def process_instance( +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + assert ( + metadata.max_iterations == 1 + ), 'max_iterations must be 1 for browsing delegation evaluation.' + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=False, + use_host_network=False, + ), + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - env_id = instance.instance_id +) -> EvalOutput: + config = get_config(metadata) # Setup the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{env_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {env_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir) else: - logger.info(f'Starting evaluation for instance {env_id}.') + logger.info(f'Starting evaluation for instance {instance.instance_id}.') instruction = ( f'You can delegate browsing tasks to a browser agent. ' @@ -67,20 +71,13 @@ def process_instance( f'NOTE: You should copy the "query" as is into the tag. DO NOT change ANYTHING in the query.' ) - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - sid=env_id, - ) - ) - - # ======= Attempt to evaluate the agent's environment impact ======= + runtime = await create_runtime(config, sid=instance.instance_id) - # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) - # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + ) if state is None: raise ValueError('State should not be None.') @@ -115,20 +112,19 @@ def process_instance( result['is_exact_match'] = is_exact_match # Save the output - output = { - 'instance_id': env_id, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': { + output = EvalOutput( + instance_id=instance.instance_id, + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ 'query': instance.instruction, 'action': last_delegate_action, 'result': result, }, - } - + ) return output @@ -138,9 +134,13 @@ def process_instance( dataset = load_dataset('OpenDevin/eval-browsing-instructions') dataset = dataset['train'].to_pandas() assert dataset.columns.tolist() == ['instance_id', 'instruction'] - id_column = 'instance_id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, @@ -150,18 +150,20 @@ def process_instance( args.eval_note, args.eval_output_dir, ) + if metadata.agent_class not in SUPPORTED_AGENT_CLS: raise ValueError( f'Agent class {metadata.agent_class} not supported with AgentDelegation.' ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(dataset, output_file, args.eval_n_limit) + asyncio.run( + run_evaluation( + instances, + metadata, + output_file, + args.eval_num_workers, + process_instance, + ) ) diff --git a/evaluation/gaia/README.md b/evaluation/gaia/README.md index 6cf911c95454..cd7e7c967709 100644 --- a/evaluation/gaia/README.md +++ b/evaluation/gaia/README.md @@ -2,9 +2,9 @@ This folder contains evaluation harness for evaluating agents on the [GAIA benchmark](https://arxiv.org/abs/2311.12983). -## Configure OpenDevin and your LLM +## Setup Environment and LLM Configuration -Create a `config.toml` file if it does not exist at the root of the workspace. Please check [README.md](../../README.md) for how to set this up. +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run the evaluation We are using the GAIA dataset hosted on [Hugging Face](https://huggingface.co/datasets/gaia-benchmark/GAIA). diff --git a/evaluation/gaia/run_infer.py b/evaluation/gaia/run_infer.py index cfbfb32eb9be..0ff4b1a4340e 100644 --- a/evaluation/gaia/run_infer.py +++ b/evaluation/gaia/run_infer.py @@ -1,10 +1,7 @@ import asyncio -import logging +import functools import os -import pathlib import re -import shutil -from functools import partial import huggingface_hub import pandas as pd @@ -13,28 +10,31 @@ from evaluation.gaia.scorer import question_scorer from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.events.action import CmdRunAction, MessageAction -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import AgentFinishAction, CmdRunAction, MessageAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime -DATASET_CACHE_DIR = '~/.cache/open-devin/evals/gaia' -DATASET_CACHE_DIR = os.path.expanduser(DATASET_CACHE_DIR) +DATASET_CACHE_DIR = os.path.join(os.path.dirname(__file__), 'data') AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { - 'CodeActAgent': partial(codeact_user_response, encapsulate_solution=True), + 'CodeActAgent': functools.partial(codeact_user_response, encapsulate_solution=True), } AGENT_CLS_TO_INST_SUFFIX = { @@ -42,151 +42,174 @@ } -def process_instance( - instance: pd.Series, +def get_config( metadata: EvalMetadata, - reset_logger: bool = True, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required ): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - # create process-specific workspace dir - # we will create a workspace directory for EACH process - # so that different agent don't interfere with each other. - old_workspace_mount_path = config.workspace_mount_path - - try: - workspace_mount_path = os.path.join( - config.workspace_mount_path, '_eval_workspace' - ) - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - config.workspace_mount_path = workspace_mount_path - - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - eval_output_dir = metadata.eval_output_dir - if reset_logger: - # Set up logger - log_file = os.path.join( - eval_output_dir, 'logs', f'instance_{instance["task_id"]}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance["task_id"]}.\nLOG: tail -f {log_file}' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - if instance['file_name'] != '': - # if this question comes with a file, we need to save it to the workspace - assert metadata.data_split is not None - src_file = os.path.join( - DATASET_CACHE_DIR, '2023', metadata.data_split, instance['file_name'] - ) - extension_name = instance['file_name'].split('.')[-1] - dest_file = os.path.join(workspace_mount_path, f'file.{extension_name}') - shutil.copyfile(src_file, dest_file) - logger.info(f'File copied to {dest_file}') - else: - dest_file = None - - # Prepare instruction - instruction = f"{instance['Question']}\n" - logger.info(f'Instruction: {instruction}') - if dest_file: - instruction += f"\n\nThe mentioned file is provided in the workspace at: {dest_file.split('/')[-1]}" - - instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' - instruction += 'Please encapsulate your final answer (answer ONLY) within and .\n' - instruction += ( - 'For example: The answer to the question is 42 .\n' + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + if instance['file_name'] != '': + # if this question comes with a file, we need to save it to the workspace + assert metadata.data_split is not None + src_file = os.path.join( + DATASET_CACHE_DIR, '2023', metadata.data_split, instance['file_name'] ) - # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX.get(agent.__class__.__name__, '') - logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'}) - - # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[ - agent.__class__.__name__ - ], - sid=instance['task_id'], - ) + assert os.path.exists(src_file) + dest_file = os.path.join('/workspace', instance['file_name']) + await runtime.copy_to(src_file, dest_file) + + # rename to file.extension_name + extension_name = instance['file_name'].split('.')[-1] + action = CmdRunAction( + command=f'mv /workspace/{instance["file_name"]} /workspace/file.{extension_name}' ) - # ======= Attempt to evaluate the agent's edits ======= - # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) - # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def process_instance( + instance: pd.Series, + metadata: EvalMetadata, + reset_logger: bool = True, +) -> EvalOutput: + config = get_config(metadata) + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir) + else: + logger.info(f'Starting evaluation for instance {instance["instance_id"]}.') + + if instance['file_name'] != '': + extension_name = instance['file_name'].split('.')[-1] + dest_file = os.path.join('/workspace', f'file.{extension_name}') + else: + dest_file = None - if state is None: - raise ValueError('State should not be None.') + # Prepare instruction + instruction = f"{instance['Question']}\n" + logger.info(f'Instruction: {instruction}') + if dest_file: + instruction += f"\n\nThe mentioned file is provided in the workspace at: {dest_file.split('/')[-1]}" - model_answer_raw = '' + instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' + instruction += 'Please encapsulate your final answer (answer ONLY) within and .\n' + instruction += ( + 'For example: The answer to the question is 42 .\n' + ) + # NOTE: You can actually set slightly different instruction for different agents + instruction += AGENT_CLS_TO_INST_SUFFIX.get(metadata.agent_class, '') + logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'}) + + runtime = await create_runtime(config, sid=instance['instance_id']) + await initialize_runtime(runtime, instance) + + # Here's how you can run the agent (similar to the `main` function) and get the final task state + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], + ) + # ======= Attempt to evaluate the agent's edits ======= + # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) + # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. + + if state is None: + raise ValueError('State should not be None.') - # get the last message or thought from the agent - for event in state.history.get_events(reverse=True): - if isinstance(event, CmdRunAction) and event.source == 'agent': + model_answer_raw = '' + # get the last message or thought from the agent + for event in state.history.get_events(reverse=True): + if event.source == 'agent': + if isinstance(event, AgentFinishAction): + model_answer_raw = event.thought + break + elif isinstance(event, CmdRunAction): model_answer_raw = event.thought - elif isinstance(event, MessageAction) and event.source == 'agent': + break + elif isinstance(event, MessageAction): model_answer_raw = event.content + break - # attempt to parse model_answer - model_answer = re.findall(r'(.*?)', model_answer_raw) - if len(model_answer) == 0: - logger.warning(f'Failed to parse model answer: {model_answer_raw}') - model_answer = model_answer_raw - else: - model_answer = model_answer[0] + # attempt to parse model_answer + model_answer = re.findall(r'(.*?)', model_answer_raw) + if len(model_answer) == 0: + logger.warning(f'Failed to parse model answer: {model_answer_raw}') + model_answer = model_answer_raw + else: + model_answer = model_answer[0] - logger.info( - f'Final message: {model_answer} | Ground truth: {instance["Final answer"]}' - ) - score = question_scorer( - model_answer=model_answer, ground_truth=instance['Final answer'] - ) - test_result = { - 'score': score, - 'model_answer_raw': model_answer_raw, - 'model_answer': model_answer, - 'ground_truth': instance['Final answer'], - } - metrics = state.metrics.get() if state.metrics else None - - # history is now available as a stream of events, rather than list of pairs of (Action, Observation) - # for compatibility with the existing output format, we can remake the pairs here - # remove when it becomes unnecessary - histories = state.history.compatibility_for_eval_history_pairs() - - # Save the output - output = { - 'instance_id': instance['task_id'], - 'instance': instance, - 'instruction': instance['Question'], - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': test_result, - } - except Exception: - logger.error('Process instance failed') - raise - finally: - config.workspace_mount_path = old_workspace_mount_path + logger.info( + f'Final message: {model_answer} | Ground truth: {instance["Final answer"]}' + ) + score = question_scorer( + model_answer=model_answer, ground_truth=instance['Final answer'] + ) + test_result = { + 'score': score, + 'model_answer_raw': model_answer_raw, + 'model_answer': model_answer, + 'ground_truth': instance['Final answer'], + } + metrics = state.metrics.get() if state.metrics else None + + # history is now available as a stream of events, rather than list of pairs of (Action, Observation) + # for compatibility with the existing output format, we can remake the pairs here + # remove when it becomes unnecessary + histories = state.history.compatibility_for_eval_history_pairs() + + # Save the output + output = EvalOutput( + instance_id=instance['instance_id'], + instance=instance.to_dict(), + instruction=instance['Question'], + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result=test_result, + ) return output @@ -197,13 +220,19 @@ def process_instance( type=str, help='gaia level to evaluate, eg. 2023_level1', ) + parser.add_argument( + '--data-split', + type=str, + help='data split to evaluate, eg. test', + default='validation', + ) args, _ = parser.parse_known_args() - if args.directory: - config.workspace_base = os.path.abspath(args.directory) - logger.info(f'Setting workspace base to {config.workspace_base}') - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config=llm_config, @@ -222,20 +251,18 @@ def process_instance( repo_type='dataset', local_dir=DATASET_CACHE_DIR, ) - gaia_tests = dataset[metadata.data_split] + gaia_tests = dataset[metadata.data_split].to_pandas() + gaia_tests.rename(columns={'task_id': 'instance_id'}, inplace=True) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - prepared_dataset = prepare_dataset( - gaia_tests.to_pandas(), output_file, args.eval_n_limit, 'task_id' - ) - - agent = Agent.get_cls(args.agent_cls)(llm=LLM(config.llm)) + prepared_dataset = prepare_dataset(gaia_tests, output_file, args.eval_n_limit) - run_evaluation( - dataset=prepared_dataset, - metadata=metadata, - output_file=output_file, - num_workers=args.eval_num_workers, - process_instance_func=process_instance, - id_column='task_id', + asyncio.run( + run_evaluation( + dataset=prepared_dataset, + metadata=metadata, + output_file=output_file, + num_workers=args.eval_num_workers, + process_instance_func=process_instance, + ) ) diff --git a/evaluation/gaia/scripts/run_infer.sh b/evaluation/gaia/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/gorilla/README.md b/evaluation/gorilla/README.md index c5da3ad4531c..106a83a251b1 100644 --- a/evaluation/gorilla/README.md +++ b/evaluation/gorilla/README.md @@ -2,20 +2,16 @@ This folder contains evaluation harness we built on top of the original [Gorilla APIBench](https://github.com/ShishirPatil/gorilla) ([paper](https://arxiv.org/pdf/2305.15334)). -## Setup Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local development environment for OpenDevin. - -## Configure OpenDevin and your LLM - -Run `make setup-config` to set up the `config.toml` file if it does not exist at the root of the workspace. +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on APIBench Instances Make sure your Docker daemon is running, then run this bash script: ```bash -bash evaluation/gorilla/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [hubs] +./evaluation/gorilla/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [hubs] ``` where `model_config` is mandatory, while all other arguments are optional. @@ -39,5 +35,5 @@ Note: in order to use `eval_limit`, you must also set `agent`; in order to use ` For example, ```bash -bash evaluation/gorilla/scripts/run_infer.sh llm 0.6.2 CodeActAgent 10 th +./evaluation/gorilla/scripts/run_infer.sh llm 0.6.2 CodeActAgent 10 th ``` diff --git a/evaluation/gorilla/run_infer.py b/evaluation/gorilla/run_infer.py index ea68d445a109..d84432c3b0aa 100644 --- a/evaluation/gorilla/run_infer.py +++ b/evaluation/gorilla/run_infer.py @@ -1,59 +1,28 @@ import asyncio import json -import logging -import multiprocessing as mp import os -import pathlib -import subprocess -import time -from concurrent.futures import ProcessPoolExecutor -from tqdm import tqdm - -from opendevin.controller.agent import Agent +import pandas as pd + +from evaluation.gorilla.utils import encode_question, get_data_for_hub +from evaluation.utils.shared import ( + EvalMetadata, + EvalOutput, + codeact_user_response, + make_metadata, + prepare_dataset, + reset_logger_for_multiprocessing, + run_evaluation, +) from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.events.action import MessageAction -from opendevin.llm.llm import LLM - -from .utils import encode_question, get_data - -config = load_app_config() - - -def cleanup(): - print('Cleaning up child processes...') - for process in mp.active_children(): - print(f'Terminating child process: {process.name}') - process.terminate() - process.join() - - -def codeact_user_response(state: State) -> str: - msg = ( - #'Please continue working on the task on whatever approach you think is suitable.\n' - 'Please run the following command: exit .\n' - #'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP OR USE THE INTERNET TO SOLVE THIS TASK.\n' - ) - - # check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up - if state.history: - user_msgs = [ - event - for event in state.history.get_events() - if isinstance(event, MessageAction) and event.source == 'user' - ] - if len(user_msgs) > 2: - # let the agent know that it can give up when it has tried 3 times - return ( - msg - + 'If you want to give up, run: exit .\n' - ) - return msg - +from opendevin.core.main import create_runtime, run_controller AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { 'CodeActAgent': codeact_user_response, @@ -64,105 +33,95 @@ def codeact_user_response(state: State) -> str: } -def process_instance(agent, question_id, question, metadata, reset_logger: bool = True): - # create process-specific workspace dir - # we will create a workspace directory for EACH process - # so that different agent don't interfere with each other. - old_workspace_mount_path = config.workspace_mount_path - try: - workspace_mount_path = os.path.join( - config.workspace_mount_path, '_eval_workspace' - ) - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - config.workspace_mount_path = workspace_mount_path - - # Setup the logger properly, so you can run multi-processing to parallize the evaluation - eval_output_dir = metadata['eval_output_dir'] - if reset_logger: - # Set up logger - log_file = os.path.join( - eval_output_dir, 'logs', f'instance_{question_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {question_id}.\nLOG: tail -f {log_file}' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # Prepare instruction - instruction = encode_question(question, metadata['hub']) - instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' - # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] - # logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'}) - - # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( - agent.__class__.__name__ - ), - sid=question_id, - ) - ) - # ======= Attempt to evaluate the agent's edits ======= - # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) - # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. - - if state is None: - raise ValueError('State should not be None.') - - # retrieve the last message from the agent - model_answer_raw = state.history.get_last_agent_message() - - # attempt to parse model_answer - _, _, ast_eval = get_data(metadata['hub']) - correct, hallucination = ast_eval(question_id, model_answer_raw) - metrics = state.metrics.get() if state.metrics else None - logger.info( - f'Final message: {model_answer_raw} | Correctness: {correct} | Hallucination: {hallucination}' - ) - - # history is now available as a stream of events, rather than list of pairs of (Action, Observation) - # for compatibility with the existing output format, we can remake the pairs here - # remove when it becomes unnecessary - histories = state.history.compatibility_for_eval_history_pairs() +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def process_instance( + instance: pd.Series, + metadata: EvalMetadata, + reset_logger: bool = True, +) -> EvalOutput: + config = get_config(metadata) + instance_id = instance['question_id'] + question = instance['question'] + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance_id}.') + + # Prepare instruction + instruction = encode_question(question, instance['hub']) + instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' + # NOTE: You can actually set slightly different instruction for different agents + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + # logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'}) + + # Here's how you can run the agent (similar to the `main` function) and get the final task state + runtime = await create_runtime(config, sid=instance_id) + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( + metadata.agent_class + ), + ) + # ======= Attempt to evaluate the agent's edits ======= + # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) + # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. + + if state is None: + raise ValueError('State should not be None.') + + # retrieve the last message from the agent + model_answer_raw = state.history.get_last_agent_message() + + # attempt to parse model_answer + ast_eval_fn = instance['ast_eval'] + correct, hallucination = ast_eval_fn(instance_id, model_answer_raw) + metrics = state.metrics.get() if state.metrics else None + logger.info( + f'Final message: {model_answer_raw} | Correctness: {correct} | Hallucination: {hallucination}' + ) - # Save the output - output = { - 'question_id': question_id, + # history is now available as a stream of events, rather than list of pairs of (Action, Observation) + # for compatibility with the existing output format, we can remake the pairs here + # remove when it becomes unnecessary + histories = state.history.compatibility_for_eval_history_pairs() + + output = EvalOutput( + instance_id=instance_id, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ 'text': model_answer_raw, 'correct': correct, 'hallucination': hallucination, - 'answer_id': 'None', - 'model_id': metadata['model_name'], - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - } - except Exception: - logger.error('Process instance failed') - raise - finally: - config.workspace_mount_path = old_workspace_mount_path + }, + ) return output @@ -175,188 +134,62 @@ def process_instance(agent, question_id, question, metadata, reset_logger: bool default='hf,torch,tf', ) args, _ = parser.parse_known_args() - if args.directory: - config.workspace_base = os.path.abspath(args.directory) - print(f'Setting workspace base to {config.workspace_base}') - # Check https://github.com/OpenDevin/OpenDevin/blob/main/evaluation/swe_bench/README.md#configure-opendevin-and-your-llm - # for details of how to set `llm_config` + llm_config = None if args.llm_config: - specified_llm_config = get_llm_config_arg(args.llm_config) - if specified_llm_config: - config.llm = specified_llm_config - logger.info(f'Config for evaluation: {config}') - agent_class = args.agent_cls - assert ( - agent_class in AGENT_CLS_TO_FAKE_USER_RESPONSE_FN - ), f'Unsupported agent class: {agent_class}' - model_name = config.llm.model.split('/')[-1] - max_iterations = args.max_iterations - eval_note = '' - if args.eval_note is not None: - eval_note += '_N_' + args.eval_note - eval_output_dir = os.path.join( - args.eval_output_dir, - 'gorilla', - agent_class, - model_name + '_maxiter_' + str(max_iterations) + eval_note, - ) - pathlib.Path(eval_output_dir).mkdir(parents=True, exist_ok=True) - pathlib.Path(os.path.join(eval_output_dir, 'logs')).mkdir( - parents=True, exist_ok=True - ) - logger.info(f'Using evaluation output directory: {eval_output_dir}') + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') - hubs = [] - if 'hf' in args.hubs: - hubs.append('hf') - if 'torch' in args.hubs or 'th' in args.hubs: - hubs.append('torch') - if 'tf' in args.hubs: - hubs.append('tf') - if hubs == []: + hubs = args.hubs.split(',') + if len(hubs) == 0: raise ValueError('Please choose at least one from hf, torch, and tf for hubs.') + dfs = [] for hub in hubs: logger.info(f'Evaluating APIBench {hub} test') - questions, question_ids, ast_eval = get_data(hub) - - # TEST METADATA - metadata = { - 'hub': hub, - 'agent_class': agent_class, - 'model_name': model_name, - 'max_iterations': max_iterations, - 'eval_output_dir': eval_output_dir, - 'start_time': time.strftime('%Y-%m-%d %H:%M:%S'), - # get the commit id of current repo for reproduciblity - 'git_commit': subprocess.check_output(['git', 'rev-parse', 'HEAD']) - .decode('utf-8') - .strip(), - } - logger.info(f'Metadata: {metadata}') - with open(os.path.join(eval_output_dir, f'metadata_{hub}.json'), 'w') as f: - json.dump(metadata, f) + df = get_data_for_hub(hub) + dfs.append(df) + dataset_df = pd.concat(dfs) + dataset_df.rename(columns={'question_id': 'instance_id'}, inplace=True) + + metadata = make_metadata( + llm_config=llm_config, + dataset_name=f'gorilla-{hub}', + agent_class=args.agent_cls, + max_iterations=args.max_iterations, + eval_note=args.eval_note, + eval_output_dir=args.eval_output_dir, + data_split=args.data_split, + ) + output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - # LIMIT EVALUATION - eval_n_limit = args.eval_n_limit - if eval_n_limit: - questions = questions[: (eval_n_limit // len(hubs))] - question_ids = question_ids[: (eval_n_limit // len(hubs))] - logger.info( - f'Limiting evaluation to a total of first {eval_n_limit} instances -> first {eval_n_limit//len(hubs)} instances per hub.' - ) - output_file = os.path.join(eval_output_dir, f'output_{model_name}_{hub}.jsonl') - logger.info(f'Writing evaluation output to {output_file}') - finished_task_ids = set() - if os.path.exists(output_file): - with open(output_file, 'r') as f: - for line in f: - data = json.loads(line) - for i in range(len(question_ids)): - if question_ids[i] == int(data['question_id']): - finished_task_ids.add(data['question_id']) - logger.warning( - f'Output file {output_file} already exists. Loaded {len(finished_task_ids)} finished instances.' - ) - output_fp = open(output_file, 'a') - logger.info( - f'Evaluation started with Agent {agent_class}, model {model_name}, max iterations {max_iterations}.' - ) - # ============================================= - # filter out finished instances - new_questions = [] - new_question_ids = [] - for i in range(len(question_ids)): - if question_ids[i] in finished_task_ids: - logger.info( - f'Skipping instance {question_ids[i]} as it is already finished.' - ) - continue - new_questions.append(questions[i]) - new_question_ids.append(question_ids[i]) + dataset = prepare_dataset( + dataset_df, output_file=output_file, eval_n_limit=args.eval_n_limit + ) - finished_task_number = len(finished_task_ids) - questions = new_questions - question_ids = new_question_ids - logger.info( - f'Finished instances: {finished_task_number}, Remaining instances: {len(question_ids)}' + asyncio.run( + run_evaluation( + dataset=dataset, + metadata=metadata, + output_file=output_file, + num_workers=args.eval_num_workers, + process_instance_func=process_instance, ) - # ============================================= - pbar = tqdm(total=len(question_ids)) - - # This function tracks the progress AND write the output to a JSONL file - def update_progress(future, pbar, output_fp, finished_task_ids): - pbar.update(1) - output = future.result() - pbar.set_description(f'Instance {output["question_id"]}') - pbar.set_postfix_str(f'Test Result: {output["correct"]}') - logger.info( - f'Finished evaluation for instance {output["question_id"]}: {output["correct"]}' - ) - output_fp.write(json.dumps(output) + '\n') - output_fp.flush() - finished_task_ids.add(output['question_id']) - - # Create the agent - agent = Agent.get_cls(agent_class)(llm=LLM(config.llm)) - - # This sets the multi-processing - num_workers = args.eval_num_workers - logger.info(f'Using {num_workers} workers for evaluation.') - try: - with ProcessPoolExecutor(num_workers) as executor: - futures = [] - # This is how we perform multi-processing - for i in range(len(question_ids)): - try: - question_id = question_ids[i] - question = questions[i] - future = executor.submit( - process_instance, - agent, - question_id, - question, - metadata, - reset_logger=bool(num_workers > 1), - ) - future.add_done_callback( - update_progress, pbar, output_fp, finished_task_ids - ) - futures.append(future) - except Exception: - continue - - # Wait for all futures to complete - for future in futures: - try: - future.result() - except Exception: - continue - except KeyboardInterrupt: - logger.info('KeyboardInterrupt received. Cleaning up...') - cleanup() - - output_fp.close() - total_correct = 0 - total_hallucination = 0 - output = [] - with open(output_file, 'r') as f: - for line in f: - data = json.loads(line) - output.append(data) - if int(data['question_id']) in finished_task_ids: - if str(data['correct']).lower() == 'true': - total_correct += 1 - if str(data['hallucination']).lower() == 'true': - total_hallucination += 1 - # sort all output by question_id - output = sorted(output, key=lambda x: x['question_id']) - with open(output_file, 'w') as f: - for dat in output: - f.write(json.dumps(dat) + '\n') - f.flush() + ) - logger.info( - f'Evaluation finished for {hub}. Total: {len(question_ids)+finished_task_number}; Correct: {total_correct}; Hallucination: {total_hallucination}. Accuracy: {total_correct / (len(question_ids)+finished_task_number)}' - ) + # Read the output file and calculate the accuracy + total_correct = 0 + total_hallucination = 0 + output = [] + with open(output_file, 'r') as f: + for line in f: + data = json.loads(line) + if data['test_result']['correct']: + total_correct += 1 + if data['test_result']['hallucination']: + total_hallucination += 1 + output.append(data) + logger.info( + f'Evaluation finished for {hub}. Total: {len(output)}; Correct: {total_correct}; Hallucination: {total_hallucination}. Accuracy: {total_correct / len(output)}' + ) diff --git a/evaluation/gorilla/scripts/run_infer.sh b/evaluation/gorilla/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/gorilla/utils.py b/evaluation/gorilla/utils.py index 66f878976b03..8c45cce58afb 100644 --- a/evaluation/gorilla/utils.py +++ b/evaluation/gorilla/utils.py @@ -1,6 +1,8 @@ import json +import os from functools import partial +import pandas as pd import requests from ast_eval_hf import ast_eval_hf, ast_parse from ast_eval_tf import ast_eval_tf @@ -48,48 +50,59 @@ def encode_question(question, api_name): return prompts -def get_data(hub): +DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') +os.makedirs(DATA_DIR, exist_ok=True) + + +def fetch_data(url, filename): + cache_path = os.path.join(DATA_DIR, filename) + if os.path.exists(cache_path): + with open(cache_path, 'r') as f: + return f.read() + else: + response = requests.get(url) + if response.status_code == 200: + with open(cache_path, 'w') as f: + f.write(response.text) + return response.text + else: + raise Exception(f'Failed to fetch data from {url}') + + +def get_data_for_hub(hub: str): if hub == 'hf': question_data = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/eval/eval-data/questions/huggingface/questions_huggingface_0_shot.jsonl' api_dataset = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/api/huggingface_api.jsonl' apibench = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/apibench/huggingface_eval.json' ast_eval = ast_eval_hf - if hub == 'torch': + elif hub == 'torch': question_data = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/eval/eval-data/questions/torchhub/questions_torchhub_0_shot.jsonl' api_dataset = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/api/torchhub_api.jsonl' apibench = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/apibench/torchhub_eval.json' ast_eval = ast_eval_th - if hub == 'tf': + elif hub == 'tf': question_data = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/eval/eval-data/questions/tensorflowhub/questions_tensorflowhub_0_shot.jsonl' api_dataset = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/api/tensorflowhub_api.jsonl' apibench = 'https://raw.githubusercontent.com/ShishirPatil/gorilla/main/data/apibench/tensorflow_eval.json' ast_eval = ast_eval_tf - # get questions and question_ids + question_data = fetch_data(question_data, 'question_data.jsonl') + api_dataset = fetch_data(api_dataset, 'api_dataset.jsonl') + apibench = fetch_data(apibench, 'apibench.json') + + # Parse question data questions = [] question_ids = [] - question_data = requests.get(question_data) - if question_data.status_code == 200: - lines = question_data.text.splitlines() - for line in lines: - questions.append(json.loads(line)['text']) - question_ids.append(json.loads(line)['question_id']) - - # get the api datasest - api_database = [] - api_dataset = requests.get(api_dataset) - if api_dataset.status_code == 200: - lines = api_dataset.text.splitlines() - for line in lines: - api_database.append(json.loads(line)) - - # get the question answer pair datasest - qa_pairs = [] - apibench = requests.get(apibench) - if apibench.status_code == 200: - lines = apibench.text.splitlines() - for line in lines: - qa_pairs.append(json.loads(line)['api_data']) + for line in question_data.splitlines(): + data = json.loads(line) + questions.append(data['text']) + question_ids.append(data['question_id']) + + # Parse API dataset + api_database = [json.loads(line) for line in api_dataset.splitlines()] + + # Parse question-answer pairs + qa_pairs = [json.loads(line)['api_data'] for line in apibench.splitlines()] # Parse all apis to ast trees ast_database = [] @@ -97,4 +110,15 @@ def get_data(hub): ast_tree = ast_parse(data['api_call']) ast_database.append(ast_tree) ast_eval = partial(ast_eval, api_database, qa_pairs, ast_database) - return questions, question_ids, ast_eval + + return pd.DataFrame( + { + 'question_id': question_ids, + 'question': questions, + 'api_database': [api_database] * len(questions), + 'qa_pairs': [qa_pairs] * len(questions), + 'ast_database': [ast_database] * len(questions), + 'ast_eval': [ast_eval] * len(questions), + 'hub': [hub] * len(questions), + } + ) diff --git a/evaluation/gpqa/README.md b/evaluation/gpqa/README.md index b8249be56d66..7e1981c1f0d7 100644 --- a/evaluation/gpqa/README.md +++ b/evaluation/gpqa/README.md @@ -15,31 +15,9 @@ Further references: - https://paperswithcode.com/dataset/gpqa - https://github.com/idavidrein/gpqa +## Setup Environment and LLM Configuration -## Setup Environment - -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - - -## Configure OpenDevin and your LLM - -Create a `config.toml` file (you can copy from `config.template.toml`) if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_azure_openai_compatible_model] -model = "AZURE_OPENAI_EXACT_DEPLOYMENT_MODEL_NAME" -base_url = "AZURE_OPENAI_ENDPOINT" -api_key = "AZURE_ENDPOINT_API_KEY" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on GPQA Benchmark 'gpqa_main', 'gqpa_diamond', 'gpqa_experts', 'gpqa_extended' -- data split options @@ -55,8 +33,3 @@ like to evaluate. It could also be a release tag like `0.6.2`. - `num_samples_eval`: Number of samples to evaluate (useful for testing and debugging). - `data_split`: The data split to evaluate on. Must be one of `gpqa_main`, `gqpa_diamond`, `gpqa_experts`, `gpqa_extended`. Defaults to `gpqa_diamond` as done in the paper. - `AgentClass`: The agent class to use for evaluation. Currently only supports `CodeActAgent` for CodeActAgent. - - -## Benchmark Evaluation Results - -- [] TODO: Finish the evaluation run across the entire benchmark and compile results diff --git a/evaluation/gpqa/run_infer.py b/evaluation/gpqa/run_infer.py index 9cc8751f3e33..8f2df78e5e16 100644 --- a/evaluation/gpqa/run_infer.py +++ b/evaluation/gpqa/run_infer.py @@ -17,9 +17,7 @@ """ import asyncio -import logging import os -import pathlib import random import re from typing import Callable @@ -29,22 +27,27 @@ from evaluation.utils.shared import ( EvalMetadata, - codeact_user_response, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.events.action import Action, AgentFinishAction, MessageAction +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import ( + Action, + AgentFinishAction, + MessageAction, +) from opendevin.events.observation import Observation -from opendevin.llm.llm import LLM - -config = load_app_config() ACTION_FORMAT = """ < AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + def gpqa_codeact_user_response( state: State, encapsulate_solution: bool = False, @@ -68,11 +92,10 @@ def gpqa_codeact_user_response( ' exit \n' 'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP TO SOLVE THIS TASK.\n' ) - return msg -AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': codeact_user_response} +AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {'CodeActAgent': gpqa_codeact_user_response} AGENT_CLS_TO_INST_SUFFIX = { 'CodeActAgent': '\n\n SUPER IMPORTANT: When you think you have solved the question, first report it back to the user in the requested format. Only once that is done, in the next turn, please run the following command: exit .\n' @@ -146,57 +169,23 @@ def convert_instance_dict(instance): return out_instance_dict -def process_instance( +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, ): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - old_workspace_mount_path = config.workspace_mount_path - old_workspace_base = config.workspace_base - try: - workspace_mount_path = os.path.join( - config.workspace_mount_path, '_eval_workspace' - ) - # create process-specific workspace dir - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # reset workspace to config - config.workspace_base = workspace_mount_path - config.workspace_mount_path = workspace_mount_path - - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{instance.instance_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance.instance_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - else: - logger.info(f'Starting evaluation for instance {instance.instance_id}.') - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # ======= Run the agent on the instance ======= - # Prepare instruction for the agent using suggested format in gpqa codebase - instruction = f""" + config = get_config(metadata) + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir) + else: + logger.info(f'Starting evaluation for instance {instance["instance_id"]}.') + + # ======= Run the agent on the instance ======= + # Prepare instruction for the agent using suggested format in gpqa codebase + instruction = f""" What is the correct answer to this question:\n {instance['question']}\n @@ -225,109 +214,98 @@ def process_instance( Ok now its time to start solving the question. Good luck! """ - # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( - agent.__class__.__name__ - ), - sid=f'gptq_{str(instance.instance_id)}', - ) - ) - assert state is not None, 'State should not be None.' - - # ======= Attempt to evaluate the agent's edits ======= - - question_choices = { - 'A': instance['choices'][0], - 'B': instance['choices'][1], - 'C': instance['choices'][2], - 'D': instance['choices'][3], - } - # get the final message from the state history (default to empty if not found) - found_answers = { - 'A': False, - 'B': False, - 'C': False, - 'D': False, - } - for event in state.history.get_events(reverse=True): - if ( - isinstance(event, AgentFinishAction) - and event.source != 'user' - and '< 0: - _selected = random.choice(found_options) - # if the final message is None, then the agent did not report the answer in the correct format - # so we randomly select one of the found options and compare it with the correct solution - test_result = _selected == instance.correct_solution - logger.info('#############################################') - logger.info('Agent did not report the answer in the correct format.') - logger.info(f'Found options: {found_options}') - logger.info(f'Selected option: {_selected}') - logger.info('#############################################') + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( + metadata.agent_class + ), + ) + assert state is not None, 'State should not be None.' + + # ======= Attempt to evaluate the agent's edits ======= + + question_choices = { + 'A': instance['choices'][0], + 'B': instance['choices'][1], + 'C': instance['choices'][2], + 'D': instance['choices'][3], + } + # get the final message from the state history (default to empty if not found) + found_answers = { + 'A': False, + 'B': False, + 'C': False, + 'D': False, + } + for event in state.history.get_events(reverse=True): + if ( + isinstance(event, AgentFinishAction) + and event.source != 'user' + and '< 0: + _selected = random.choice(found_options) + # if the final message is None, then the agent did not report the answer in the correct format + # so we randomly select one of the found options and compare it with the correct solution + test_result = _selected == instance.correct_solution logger.info('#############################################') - logger.info(f'Test result: {test_result}') + logger.info('Agent did not report the answer in the correct format.') + logger.info(f'Found options: {found_options}') + logger.info(f'Selected option: {_selected}') logger.info('#############################################') - # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) - # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. - if state is None: - raise ValueError('State should not be None.') - - metrics = state.metrics.get() if state.metrics else None - - # Save the output - output = { - 'task_id': instance.task_id, - 'instance_id': instance.instance_id, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': state.history.compatibility_for_eval_history_pairs(), - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': { - 'result': test_result, - 'found_answers': found_answers, - 'last_message': final_message, - }, - } - - except Exception: - logger.error('Process instance failed') - raise - finally: - config.workspace_mount_path = old_workspace_mount_path - config.workspace_base = old_workspace_base + logger.info('#############################################') + logger.info(f'Test result: {test_result}') + logger.info('#############################################') + + # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) + # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. + if state is None: + raise ValueError('State should not be None.') + + metrics = state.metrics.get() if state.metrics else None + + # Save the output + output = EvalOutput( + instance_id=str(instance.instance_id), + instruction=instruction, + metadata=metadata, + history=state.history.compatibility_for_eval_history_pairs(), + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ + 'result': test_result, + 'found_answers': found_answers, + 'last_message': final_message, + }, + ) return output @@ -343,8 +321,11 @@ def process_instance( ) args, _ = parser.parse_known_args() - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing # so we don't need to manage file uploading to OpenDevin's repo @@ -355,8 +336,6 @@ def process_instance( gpqa_dataset = gpqa_dataset.to_pandas() # Add a new column 'instance_id' with the index gpqa_dataset['instance_id'] = gpqa_dataset.index - gpqa_dataset['task_id'] = gpqa_dataset.index - # gpqa_dataset = dataset['train'].to_pandas().sort_values(by='id').reset_index(drop=True) if args.agent_cls != 'CodeActAgent': raise ValueError( @@ -374,15 +353,14 @@ def process_instance( ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - prepared_dataset = prepare_dataset( - gpqa_dataset, output_file, args.eval_n_limit, 'task_id' - ) - - run_evaluation( - dataset=prepared_dataset, - metadata=metadata, - output_file=output_file, - num_workers=args.eval_num_workers, - process_instance_func=process_instance, - id_column='task_id', + prepared_dataset = prepare_dataset(gpqa_dataset, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + dataset=prepared_dataset, + metadata=metadata, + output_file=output_file, + num_workers=args.eval_num_workers, + process_instance_func=process_instance, + ) ) diff --git a/evaluation/humanevalfix/README.md b/evaluation/humanevalfix/README.md index d231be1a6166..5a056a54da87 100644 --- a/evaluation/humanevalfix/README.md +++ b/evaluation/humanevalfix/README.md @@ -1,39 +1,10 @@ # HumanEvalFix Evaluation with OpenDevin -Implements evaluation of agents on HumanEvalFix from the HumanEvalPack benchmark introduced in [OctoPack: Instruction Tuning Code Large Language Models](https://arxiv.org/abs/2308.07124). Please see [here](https://github.com/bigcode-project/bigcode-evaluation-harness/blob/main/bigcode_eval/tasks/humanevalpack.py) for the reference implementation used in the paper. +Implements evaluation of agents on HumanEvalFix from the HumanEvalPack benchmark introduced in [OctoPack: Instruction Tuning Code Large Language Models](https://arxiv.org/abs/2308.07124). Please see [here](https://github.com/bigcode-project/bigcode-evaluation-harness/blob/main/bigcode_eval/tasks/humanevalpack.py) for the reference implementation used in the paper. Currently only `python` evaluation is supported. -## Setup Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - - -## Configure OpenDevin and your LLM - -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" - -[sandbox] -enable_auto_lint = true - -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on HumanEvalFix diff --git a/evaluation/humanevalfix/run_infer.py b/evaluation/humanevalfix/run_infer.py index fa683966339f..8fc895042820 100644 --- a/evaluation/humanevalfix/run_infer.py +++ b/evaluation/humanevalfix/run_infer.py @@ -9,9 +9,9 @@ """ import asyncio -import logging import os -import pathlib +import tempfile +from typing import Any import pandas as pd from datasets import load_dataset @@ -19,20 +19,25 @@ from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import CmdRunAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime IMPORT_HELPER = { 'python': [ @@ -72,19 +77,105 @@ } -def get_test_result(instance, path, language='python', timeout=10): - # Evaluation reference: https://github.com/bigcode-project/bigcode-evaluation-harness/blob/84b96da31b7f840b55c5733325346176140cdb6b/bigcode_eval/tasks/humanevalpack.py#L347 +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +def _get_instance_id(instance: pd.Series) -> str: + return instance.task_id.replace('/', '__') + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + problem_statement = ( + instance.declaration + instance.buggy_solution + '\n' + instance.test + ) + filename = f'{_get_instance_id(instance)}.py' + with tempfile.TemporaryDirectory() as tmpdir: + host_script_path = os.path.join(tmpdir, filename) + with open(host_script_path, 'w') as f: + f.write(problem_statement) + await runtime.copy_to( + host_script_path, + '/workspace', + ) + + # check file exists + action = CmdRunAction(command=f'ls /workspace/{_get_instance_id(instance)}.py') + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def complete_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation + + # default value + language = 'python' + timeout = 10 + test_result = {'result': {}, 'metadata': {}} code_metric = load('Muennighoff/code_eval_octopack') timeout = LANGUAGE_TO_TIMEOUT[language] num_workers = LANGUAGE_TO_NUM_WORKERS[language] python_imports = '\n'.join(IMPORT_HELPER[language]) - # Load function from path - with open(path, 'r') as f: - function = f.read() + action = CmdRunAction( + command=f'cat /workspace/{_get_instance_id(instance)}.py', keep_prompt=False + ) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 - function = [[python_imports + '\n' + function.strip()]] + function = obs.content.replace('\r\n', '\n') + logger.info(f'Function: {function}') + function = [[python_imports + '\n' + function]] results, logs = code_metric.compute( references=[instance.test], @@ -99,129 +190,79 @@ def get_test_result(instance, path, language='python', timeout=10): 'timeout': timeout, 'num_workers': num_workers, } + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") return test_result -def process_instance( +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - old_workspace_mount_path = config.workspace_mount_path - old_workspace_base = config.workspace_base - - try: - workspace_mount_path = os.path.join( - config.workspace_mount_path, '_eval_workspace' - ) - # create process-specific workspace dir - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # reset workspace to config - config.workspace_base = workspace_mount_path - config.workspace_mount_path = workspace_mount_path - - # use a session id for concurrent evaluation - sid = instance.task_id.replace('/', '__') - - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, - 'logs', - f'instance_{sid}.log', - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance.task_id}.\nLOG: tail -f {log_file}' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # Create file with HumanEvalFix problem - # Prompt reference: https://github.com/bigcode-project/bigcode-evaluation-harness/blob/84b96da31b7f840b55c5733325346176140cdb6b/bigcode_eval/tasks/humanevalpack.py#L509 - problem_statement = ( - instance.declaration + instance.buggy_solution + '\n' + instance.test - ) - path = os.path.join(workspace_mount_path, f'{sid}.py') - with open(path, 'w') as f: - f.write(problem_statement) +) -> EvalOutput: + config = get_config(metadata) + # use a session id for concurrent evaluation + sid = _get_instance_id(instance) + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance.task_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance.task_id}.') + + # Create file with HumanEvalFix problem + # Prompt reference: https://github.com/bigcode-project/bigcode-evaluation-harness/blob/84b96da31b7f840b55c5733325346176140cdb6b/bigcode_eval/tasks/humanevalpack.py#L509 + problem_statement = ( + instance.declaration + instance.buggy_solution + '\n' + instance.test + ) - # Prepare instruction - instruction = ( - f'Please fix the function in {instance.task_id.replace("/", "__")}.py such that all test cases pass.\n' - 'Environment has been set up for you to start working. You may assume all necessary tools are installed.\n\n' - '# Problem Statement\n' - f'{problem_statement}\n\n' - ) - instruction += ( - 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' - 'You should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\n' - 'You SHOULD INCLUDE PROPER INDENTATION in your edit commands.\n' - ) - # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] - - # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( - agent.__class__.__name__ - ), - sid=sid, - ) - ) + # Prepare instruction + instruction = ( + f'Please fix the function in {sid}.py such that all test cases pass.\n' + 'Environment has been set up for you to start working. You may assume all necessary tools are installed.\n\n' + '# Problem Statement\n' + f'{problem_statement}\n\n' + ) + instruction += ( + 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' + 'You should NOT modify any existing test case files. If needed, you can add new test cases in a NEW file to reproduce the issue.\n' + 'You SHOULD INCLUDE PROPER INDENTATION in your edit commands.\n' + ) + # NOTE: You can actually set slightly different instruction for different agents + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + + # Here's how you can run the agent (similar to the `main` function) and get the final task state + runtime = await create_runtime(config, sid=sid) + await initialize_runtime(runtime, instance) + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( + metadata.agent_class + ), + ) - # ======= Attempt to evaluate the agent's edits ======= - test_result = get_test_result(instance, path) - - # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) - # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. - if state is None: - raise ValueError('State should not be None.') - metrics = state.metrics.get() if state.metrics else None - - # history is now available as a stream of events, rather than list of pairs of (Action, Observation) - # for compatibility with the existing output format, we can remake the pairs here - # remove when it becomes unnecessary - histories = state.history.compatibility_for_eval_history_pairs() - - # Save the output - output = { - 'task_id': instance.task_id, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': test_result, - } - except Exception: - logger.error('Process instance failed') - raise - finally: - config.workspace_mount_path = old_workspace_mount_path - config.workspace_base = old_workspace_base + if state is None: + raise ValueError('State should not be None.') + metrics = state.metrics.get() if state.metrics else None + test_result = await complete_runtime(runtime, instance) + + # history is now available as a stream of events, rather than list of pairs of (Action, Observation) + # for compatibility with the existing output format, we can remake the pairs here + # remove when it becomes unnecessary + histories = state.history.compatibility_for_eval_history_pairs() + + # Save the output + output = EvalOutput( + instance_id=instance.task_id, + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result=test_result, + ) return output @@ -234,28 +275,31 @@ def process_instance( 'bigcode/humanevalpack', 'python' ) # TODO: Support other languages hefix_tests = dataset['test'].to_pandas() + hefix_tests.rename(columns={'task_id': 'instance_id'}, inplace=True) - id_column = 'task_id' - - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + 'humanevalfix-python', args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(hefix_tests, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, + metadata, + output_file, + args.eval_num_workers, + process_instance, + ) ) diff --git a/evaluation/humanevalfix/scripts/run_infer.sh b/evaluation/humanevalfix/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/logic_reasoning/Dockerfile b/evaluation/logic_reasoning/Dockerfile new file mode 100644 index 000000000000..0730c2e36d0d --- /dev/null +++ b/evaluation/logic_reasoning/Dockerfile @@ -0,0 +1,7 @@ +FROM ubuntu:22.04 + +RUN apt-get update && apt-get install -y python3 python3-pip + +RUN pip install scitools-pyke + +# docker build -t xingyaoww/od_logic_reasoning . diff --git a/evaluation/logic_reasoning/README.md b/evaluation/logic_reasoning/README.md index c0e313cf8beb..79faae4fe071 100644 --- a/evaluation/logic_reasoning/README.md +++ b/evaluation/logic_reasoning/README.md @@ -2,38 +2,13 @@ This folder contains evaluation harness for evaluating agents on the logic reasoning benchmark [ProntoQA](https://github.com/asaparov/prontoqa) and [ProofWriter](https://allenai.org/data/proofwriter). -## Configure OpenDevin and your LLM +## Setup Environment and LLM Configuration -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" - -[sandbox] -enable_auto_lint = true - -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview_llm] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model_llm] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on logic_reasoning -The following code will run inference on the first example of the ProntoQA dataset, -using OpenDevin 0.6.2 version. +The following code will run inference on the first example of the ProofWriter dataset, ```bash -./evaluation/logic_reasoning/scripts/run_infer.sh ProntoQA eval_gpt4_1106_preview_llm 0.6.2 1 +./evaluation/logic_reasoning/scripts/run_infer.sh eval_gpt4_1106_preview_llm ProofWriter ``` diff --git a/evaluation/logic_reasoning/instruction.txt b/evaluation/logic_reasoning/instruction.txt index bb49e883c7a3..2a9b16582e3a 100644 --- a/evaluation/logic_reasoning/instruction.txt +++ b/evaluation/logic_reasoning/instruction.txt @@ -3,12 +3,12 @@ you can interact with an interactive Python (Jupyter Notebook) environment and r In this task, you need to use the code in [[logic_inference_path.py]] to help you. Specifically, you first need to instantiate a **LogicInferenceEngine** class and use the **safe_execute_program** method to prove the **logic programs**. You should receive *answer*, *flag*, *error_message* from the output. An example would be look like this: - - import sys - sys.path.append(workspace_mount_path) - engine = LogicInferenceEngine(dataset_name, workspace_mount_path) - answer, flag, error_message = engine.safe_execute_program(logic_programs) - + +import sys +sys.path.append('/workspace') +engine = LogicInferenceEngine() +answer, flag, error_message = engine.safe_execute_program(logic_programs) + Please send the *answer* variable through message. diff --git a/evaluation/logic_reasoning/logic_inference.py b/evaluation/logic_reasoning/logic_inference.py index fd8404da10a7..fd6d52b4c6dd 100644 --- a/evaluation/logic_reasoning/logic_inference.py +++ b/evaluation/logic_reasoning/logic_inference.py @@ -191,9 +191,9 @@ def answer_map_proofwriter(self, result, value_to_check): class LogicInferenceEngine: - def __init__(self, dataset_name, workspace_mount_path): - self.dataset_name = dataset_name - self.workspace_mount_path = workspace_mount_path + def __init__(self): + self.dataset_name = os.environ.get('DATASET_NAME', 'ProofWriter') + self.workspace_mount_path = '/workspace' def random_backup(self): if self.dataset_name == 'ProntoQA': diff --git a/evaluation/logic_reasoning/run_infer.py b/evaluation/logic_reasoning/run_infer.py index f8a050bfd22e..d0739d4f39e0 100644 --- a/evaluation/logic_reasoning/run_infer.py +++ b/evaluation/logic_reasoning/run_infer.py @@ -1,29 +1,35 @@ import asyncio -import logging import os -import pathlib -import shutil import pandas as pd from datasets import load_dataset -from evaluation.swe_bench.swe_env_box import DockerSSHBox from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import ( + AgentFinishAction, + CmdRunAction, + IPythonRunCellAction, + MessageAction, +) +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { 'CodeActAgent': codeact_user_response, @@ -34,6 +40,28 @@ } +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='xingyaoww/od-eval-logic-reasoning:v1.0', + enable_auto_lint=True, + use_host_network=False, + od_runtime_extra_deps='$OD_INTERPRETER_PATH -m pip install scitools-pyke', + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + def get_choice(answer_str): choices = [ 'A', @@ -83,7 +111,7 @@ def get_test_result( 'the correct answer is', 'The correct answer is', 'The correct option is', - 'Thus, the answer is', + 'the answer is', ] if prediction is None: for indicator in indicators: @@ -97,154 +125,143 @@ def get_test_result( return test_result -def process_instance( +CUR_EVAL_DIR = os.path.dirname(__file__) + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + # copy logic_inference.py to /workspace + await runtime.copy_to( + os.path.join(CUR_EVAL_DIR, 'logic_inference.py'), '/workspace' + ) + # check if the file exists + obs = await runtime.run_action(CmdRunAction(command='ls /workspace')) + assert obs.exit_code == 0 + assert 'logic_inference.py' in obs.content + + await runtime.add_env_vars({'DATASET_NAME': metadata.dataset}) + + action = CmdRunAction(command='mkdir -p /workspace/.cache_program') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = IPythonRunCellAction(code='%pip install scitools-pyke') + logger.info(action, extra={'msg_type': 'ACTION'}) + ipynb_obs = await runtime.run_action(action) + logger.info(ipynb_obs, extra={'msg_type': 'OBSERVATION'}) + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +# Prepare instruction +with open(os.path.join(CUR_EVAL_DIR, 'instruction.txt'), 'r') as f: + INSTRUCTION_TEMPLATE = f.read() + + +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, ): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - old_workspace_mount_path = config.workspace_mount_path - old_workspace_base = config.workspace_base - - try: - workspace_mount_path = os.path.join( - config.workspace_mount_path, '_eval_workspace' - ) - # create process-specific workspace dir - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # reset workspace to config - config.workspace_base = workspace_mount_path - config.workspace_mount_path = workspace_mount_path - - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{instance["id"]}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance["id"]}.\nLOG: tail -f {log_file}' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # sandbox = DockerSSHBox() - logic_inference_path = os.path.join(workspace_mount_path, 'logic_inference.py') - if not os.path.exists(logic_inference_path): - shutil.copyfile( - './evaluation/logic_reasoning/logic_inference.py', logic_inference_path - ) - logger.info(f'logic_inference.py copied to {workspace_mount_path}') - - cache_dir = os.path.join(workspace_mount_path, '.cache_program') - if not os.path.exists(cache_dir): - os.makedirs(cache_dir) - - # Prepare instruction - - with open('./evaluation/logic_reasoning/instruction.txt', 'r') as f: - instruction = f.read() - - instance_logic_programs = instance['raw_logic_programs'][0].strip() - instruction = instruction.replace('[[dataset_name]]', dataset_name) - instruction = instruction.replace('[[logic_programs]]', instance_logic_programs) - instruction = instruction.replace( - '[[logic_inference_path.py]]', logic_inference_path - ) + config = get_config(metadata) + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir) + else: + logger.info(f'Starting evaluation for instance {instance["instance_id"]}.') + + instance_logic_programs = instance['raw_logic_programs'][0].strip() + instruction = ( + INSTRUCTION_TEMPLATE.replace('[[dataset_name]]', dataset_name) + .replace('[[logic_programs]]', instance_logic_programs) + .replace('[[logic_inference_path.py]]', '/workspace/logic_inference.py') + ) - # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] - - # use a session id for concurrent evaluation - sid = instance['id'] + '_' + str(os.getpid()) - sandbox = DockerSSHBox(sid=sid) - exit_code, command_output = sandbox.execute('pip install scitools-pyke') - - # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( - agent.__class__.__name__ - ), - sandbox=sandbox, - sid=sid, - ) - ) - # ======= Attempt to evaluate the agent's edits ======= - # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) - # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. - - if state is None: - raise ValueError('State should not be None.') - - final_message = '' - messages = [] - for event in state.history.get_events(reverse=True): - # will this be a MessageAction? - # TODO we can filter for types of events if we know what to expect - messages.append(event.content) - if str(event.content) in ["'A'", "'B'", "'C'"]: - final_message = event.content - break + # NOTE: You can actually set slightly different instruction for different agents + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] - final_message = final_message.strip("'") - logger.info( - f'Predicted answer: {final_message}, Ground truth: {instance["answer"]}' - ) + # use a session id for concurrent evaluation + sid = instance['instance_id'] + + runtime = await create_runtime(config, sid=sid) + await initialize_runtime(runtime, instance) - test_result = get_test_result( - model_answer=final_message, ground_truth=instance['answer'] + # Here's how you can run the agent (similar to the `main` function) and get the final task state + state: State | None = asyncio.run( + run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( + metadata.agent_class + ), ) - metrics = state.metrics.get() if state.metrics else None - - # history is now available as a stream of events, rather than list of pairs of (Action, Observation) - # for compatibility with the existing output format, we can remake the pairs here - # remove when it becomes unnecessary - histories = state.history.compatibility_for_eval_history_pairs() - - # Save the output - output = { - 'id': instance['id'], - 'instance': instance, - 'instruction': instruction, - # 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'final_message': final_message, - 'messages': messages, - 'error': state.last_error if state and state.last_error else None, - 'test_result': test_result, - } - except Exception: - logger.error('Process instance failed') - raise - finally: - config.workspace_mount_path = old_workspace_mount_path - config.workspace_base = old_workspace_base - - # Close the sandbox - sandbox.close() + ) + # ======= Attempt to evaluate the agent's edits ======= + # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) + # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. + + if state is None: + raise ValueError('State should not be None.') + + final_message = '' + for event in state.history.get_events(reverse=True): + if isinstance(event, AgentFinishAction): + final_message = event.thought + break + elif isinstance(event, MessageAction): + final_message = event.content + break + + final_message = final_message.strip("'") + logger.info( + f'Predicted answer: {final_message}, Ground truth: {instance["answer"]}' + ) + test_result = get_test_result( + model_answer=final_message, ground_truth=instance['answer'] + ) + test_result['final_message'] = final_message + + metrics = state.metrics.get() if state.metrics else None + # history is now available as a stream of events, rather than list of pairs of (Action, Observation) + # for compatibility with the existing output format, we can remake the pairs here + # remove when it becomes unnecessary + histories = state.history.compatibility_for_eval_history_pairs() + + # Save the output + output = EvalOutput( + instance_id=instance['instance_id'], + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result=test_result, + ) return output @@ -254,7 +271,7 @@ def process_instance( '--dataset', type=str, help='the logic reasoning dataset to evaluate on {ProntoQA, ProofWriter}', - default='ProntoQA', + default='ProofWriter', ) parser.add_argument( '--data_split', @@ -262,36 +279,32 @@ def process_instance( help='data split to evaluate on {validation}', # right now we only support validation split default='validation', ) - args, _ = parser.parse_known_args() - if args.directory: - config.workspace_base = os.path.abspath(args.directory) - print(f'Setting workspace base to {config.workspace_base}') dataset_name = args.dataset data_split = args.data_split dataset = load_dataset(f'renma/{dataset_name}') - logic_reasoning_tests = dataset[data_split] + dataset_df = dataset[data_split].to_pandas() + dataset_df.rename(columns={'id': 'instance_id'}, inplace=True) - id_column = 'id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + dataset_name, args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(dataset_df, output_file, args.eval_n_limit) + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/logic_reasoning/scripts/run_infer.sh b/evaluation/logic_reasoning/scripts/run_infer.sh old mode 100644 new mode 100755 index b7cec5cd546d..21fb7a1ae757 --- a/evaluation/logic_reasoning/scripts/run_infer.sh +++ b/evaluation/logic_reasoning/scripts/run_infer.sh @@ -3,8 +3,8 @@ set -eo pipefail source "evaluation/utils/version_control.sh" -DATASET=$1 -MODEL_CONFIG=$2 +MODEL_CONFIG=$1 +DATASET=$2 COMMIT_HASH=$3 EVAL_LIMIT=$4 AGENT=$5 @@ -23,6 +23,11 @@ if [ -z "$AGENT" ]; then AGENT="CodeActAgent" fi +if [ -z "$DATASET" ]; then + echo "Dataset not specified, use default ProofWriter" + DATASET="ProofWriter" +fi + get_agent_version echo "AGENT: $AGENT" diff --git a/evaluation/miniwob/Dockerfile b/evaluation/miniwob/Dockerfile new file mode 100644 index 000000000000..b7d191ac6786 --- /dev/null +++ b/evaluation/miniwob/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:22.04 + +RUN apt-get update && apt-get install -y python3 python3-pip git + +RUN git clone https://github.com/Farama-Foundation/miniwob-plusplus.git /miniwob-plusplus && \ + git -C "/miniwob-plusplus" reset --hard 7fd85d71a4b60325c6585396ec4f48377d049838 + +ENV MINIWOB_URL="file:///miniwob-plusplus/miniwob/html/miniwob/" + +# docker build -t xingyaoww/od-eval-miniwob . diff --git a/evaluation/miniwob/README.md b/evaluation/miniwob/README.md index 7c26872a8d58..4426ebff7018 100644 --- a/evaluation/miniwob/README.md +++ b/evaluation/miniwob/README.md @@ -2,52 +2,9 @@ This folder contains evaluation for [MiniWoB++](https://miniwob.farama.org/) benchmark, powered by [BrowserGym](https://github.com/ServiceNow/BrowserGym) for easy evaluation of how well an agent capable of browsing can perform on synthetic web browsing tasks. -## Setup OpenDevin Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - -## Configure OpenDevin and your LLM - -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" - -[sandbox] -box_type = "ssh" -timeout = 120 - -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` - -## Setup MiniWoB++ Environment and Environment Variables of MiniWoB++ -MiniWoB++ requires you to set up websites containing a static website that is accessible via URL to the machine running the OpenDevin agents. - -- Clone miniwob (use a specific frozen commit for reproducibility) -```sh -git clone git@github.com:Farama-Foundation/miniwob-plusplus.git -git -C "./miniwob-plusplus" reset --hard 7fd85d71a4b60325c6585396ec4f48377d049838 -``` - -- Setup Miniwob URL (change `PATH_TO_MINIWOB_CLONED_REPO` here to the absolute path to your `miniwob-plusplus` folder) in `evaluation/miniwob/scripts/run_infer.sh` -```sh -export MINIWOB_URL="file:///miniwob/html/miniwob/" -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Test if your environment works @@ -56,7 +13,7 @@ Access with browser the above MiniWoB URLs and see if they load correctly. ## Run Evaluation ```sh -bash evaluation/miniwob/scripts/run_infer.sh +./evaluation/miniwob/scripts/run_infer.sh llm.claude-35-sonnet-eval ``` Results will be in `evaluation/evaluation_outputs/outputs/miniwob/` diff --git a/evaluation/miniwob/run_infer.py b/evaluation/miniwob/run_infer.py index 9218ac56d9bb..92c96bf42572 100644 --- a/evaluation/miniwob/run_infer.py +++ b/evaluation/miniwob/run_infer.py @@ -1,7 +1,7 @@ import asyncio import json -import logging import os +from typing import Any import browsergym.miniwob # noqa F401 register miniwob tasks as gym environments import gymnasium as gym @@ -9,84 +9,131 @@ from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM -from opendevin.runtime.docker.ssh_box import DockerSSHBox -from opendevin.runtime.tools import RuntimeTool - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import ( + BrowseInteractiveAction, + CmdRunAction, + MessageAction, +) +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.browser.browser_env import ( + BROWSER_EVAL_GET_GOAL_ACTION, + BROWSER_EVAL_GET_REWARDS_ACTION, +) +from opendevin.runtime.runtime import Runtime SUPPORTED_AGENT_CLS = {'BrowsingAgent'} -docker_ssh_box: DockerSSHBox | None = None - -def get_sandbox(): - global docker_ssh_box - if docker_ssh_box is None: - docker_ssh_box = DockerSSHBox() - return docker_ssh_box +def get_config( + metadata: EvalMetadata, + env_id: str, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='xingyaoww/od-eval-miniwob:v1.0', + enable_auto_lint=True, + use_host_network=False, + browsergym_eval_env=env_id, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, +) -> str: + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = BrowseInteractiveAction(browser_actions=BROWSER_EVAL_GET_GOAL_ACTION) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + goal = obs.content + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + return goal + + +async def complete_runtime( + runtime: Runtime, +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation + + action = BrowseInteractiveAction(browser_actions=BROWSER_EVAL_GET_REWARDS_ACTION) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") + return { + 'rewards': json.loads(obs.content), + } -def process_instance( +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) +) -> EvalOutput: env_id = instance.id + config = get_config(metadata, env_id) + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{env_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {env_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, env_id, log_dir) else: logger.info(f'Starting evaluation for instance {env_id}.') - # Here's how you can run the agent (similar to the `main` function) and get the final task state - runtime_tools_config = { - RuntimeTool.BROWSER: { - 'browsergym_eval': env_id, - 'browsergym_eval_save_dir': metadata.eval_output_dir, - } - } + runtime = await create_runtime(config, sid=env_id) + task_str = await initialize_runtime(runtime) state: State | None = asyncio.run( - run_agent_controller( - agent, - 'PLACEHOLDER_GOAL', - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - runtime_tools_config=runtime_tools_config, - sandbox=get_sandbox(), - sid=env_id, + run_controller( + config=config, + task_str=task_str, # take output from initialize_runtime + runtime=runtime, ) ) @@ -99,18 +146,17 @@ def process_instance( raise ValueError('State should not be None.') metrics = state.metrics.get() if state.metrics else None - browsergym_eval_dir = os.path.join(metadata.eval_output_dir, env_id.split('/')[1]) - # read goal - with open( - os.path.join(browsergym_eval_dir, 'goal.txt'), 'r', encoding='utf-8' - ) as f: - instruction = f.read() - # read reward - with open( - os.path.join(browsergym_eval_dir, 'rewards.json'), 'r', encoding='utf-8' - ) as f: - rewards = json.load(f) - reward = max(rewards) + + # Instruction is the first message from the USER + instruction = '' + for event in state.history.get_events(): + if isinstance(event, MessageAction): + instruction = event.content + break + + return_val = await complete_runtime(runtime) + logger.info(f'Return value from complete_runtime: {return_val}') + reward = max(return_val['rewards']) # history is now available as a stream of events, rather than list of pairs of (Action, Observation) # for compatibility with the existing output format, we can remake the pairs here @@ -118,16 +164,17 @@ def process_instance( histories = state.history.compatibility_for_eval_history_pairs() # Save the output - output = { - 'instance_id': env_id, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': reward, - } - + output = EvalOutput( + instance_id=env_id, + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ + 'reward': reward, + }, + ) return output @@ -136,7 +183,7 @@ def process_instance( dataset = pd.DataFrame( { - 'id': [ + 'instance_id': [ id for id in gym.envs.registry.keys() if id.startswith('browsergym/miniwob') @@ -144,26 +191,25 @@ def process_instance( } ) - id_column = 'id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + 'miniwob', args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - _ = get_sandbox() # Initialize the sandbox - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(dataset, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/miniwob/scripts/run_infer.sh b/evaluation/miniwob/scripts/run_infer.sh old mode 100644 new mode 100755 index e6131b8c98a2..0fe525ba7958 --- a/evaluation/miniwob/scripts/run_infer.sh +++ b/evaluation/miniwob/scripts/run_infer.sh @@ -3,14 +3,10 @@ set -eo pipefail source "evaluation/utils/version_control.sh" -# configure miniwob website, change URL to yours -export MINIWOB_URL="file:///home/fangzhex/miniwob-plusplus/miniwob/html/miniwob/" - # configure browsing agent export USE_NAV="false" export USE_CONCISE_ANSWER="true" - MODEL_CONFIG=$1 COMMIT_HASH=$2 AGENT=$3 @@ -42,7 +38,7 @@ COMMAND="poetry run python evaluation/miniwob/run_infer.py \ --llm-config $MODEL_CONFIG \ --max-iterations 10 \ --max-chars 10000000 \ - --eval-num-workers $NUM_WORKERS \ + --eval-num-workers $NUM_WORKERS" if [ -n "$EVAL_LIMIT" ]; then echo "EVAL_LIMIT: $EVAL_LIMIT" diff --git a/evaluation/mint/Dockerfile b/evaluation/mint/Dockerfile new file mode 100644 index 000000000000..af7366763dd0 --- /dev/null +++ b/evaluation/mint/Dockerfile @@ -0,0 +1,10 @@ +FROM ubuntu:22.04 + +RUN apt-get update && apt-get install -y python3 python3-pip git gcc + +WORKDIR /root + +COPY requirements.txt . +RUN pip install -r requirements.txt + +# docker build -t xingyaoww/od-eval-mint:v1.0 . diff --git a/evaluation/mint/README.md b/evaluation/mint/README.md index 1e07bd643153..3925cf5e407f 100644 --- a/evaluation/mint/README.md +++ b/evaluation/mint/README.md @@ -2,9 +2,11 @@ This folder contains the evaluation harness for the [MINT benchmark](https://arxiv.org/abs/2309.10691) on LLMs' ability to solve tasks with multi-turn interactions. -## Configure OpenDevin and LM +We support evaluation of the [Eurus subset focus on math and code reasoning](https://arxiv.org/abs/2404.02078), including MATH, MMLU, TheoremQA, HumanEval, MBPP. -Create a `config.toml` file if it does not exist at the root of the workspace. Please check [README.md](../../README.md) for how to set this up. +## Setup Environment and LLM Configuration + +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Start the evaluation diff --git a/evaluation/mint/run_infer.py b/evaluation/mint/run_infer.py index b1d4c0826880..42ea45901456 100644 --- a/evaluation/mint/run_infer.py +++ b/evaluation/mint/run_infer.py @@ -1,33 +1,36 @@ -import asyncio import functools -import logging import os -import pathlib from typing import Any, Dict +import pandas as pd from datasets import load_dataset -from evaluation.swe_bench.swe_env_box import DockerSSHBox +from evaluation.mint.datatypes import TaskState +from evaluation.mint.env import SimplifiedEnv +from evaluation.mint.prompts import ToolPromptTemplate +from evaluation.mint.tasks import Task from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -from .datatypes import TaskState -from .env import SimplifiedEnv -from .prompts import ToolPromptTemplate -from .tasks import Task - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import ( + CmdRunAction, +) +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime def codeact_user_response_mint(state: State, task: Task, task_config: Dict[str, int]): @@ -42,7 +45,7 @@ def codeact_user_response_mint(state: State, task: Task, task_config: Dict[str, last_action = state.history.get_last_action() result_state: TaskState = env.step(last_action.message or '') - state.task_state = result_state + state.extra_data['task_state'] = result_state if not result_state.latest_output: # Task is finished @@ -62,77 +65,107 @@ def codeact_user_response_mint(state: State, task: Task, task_config: Dict[str, 'CodeActAgent': '\nIMPORTANT: When your answer is confirmed by the user to be correct, you can exit using the following command: exit .\n' } +with open(os.path.join(os.path.dirname(__file__), 'requirements.txt'), 'r') as f: + MINT_DEPENDENCIES = f.read().splitlines() + + +def load_incontext_example(task_name: str, with_tool: bool = True): + assert with_tool, 'NOT with_tool is not supported yet' + subset = { + 'gsm8k': 'reasoning', + 'math': 'reasoning', + 'mmlu': 'reasoning', + 'theoremqa': 'reasoning', + 'mbpp': 'mbpp', + 'humaneval': 'humaneval', + }[task_name] + with open( + os.path.join( + os.path.dirname(__file__), + 'tasks', + 'in_context_examples', + subset, + 'with_tool.txt', + ), + 'r', + ) as f: + return f.read() + + +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='xingyaoww/od-eval-mint:v1.0', + enable_auto_lint=True, + use_host_network=False, + od_runtime_extra_deps=f'$OD_INTERPRETER_PATH -m pip install {" ".join(MINT_DEPENDENCIES)}', + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime(runtime: Runtime): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 -def process_instance( + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def process_instance( instance: Any, metadata: EvalMetadata, reset_logger: bool = True, ): - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(metadata.llm_config)) - workspace_mount_path = os.path.join(config.workspace_mount_path, '_eval_workspace') - # create process-specific workspace dir - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) + config = get_config(metadata) # Setup the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{instance.task_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance.task_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # use a session id for concurrent processing - sid = instance.task_id + '_' + str(os.getpid()) - sandbox = DockerSSHBox(sid=sid) - - requirements_host_src = 'evaluation/mint/requirements.txt' - requirements_sandbox_dest = '/opendevin/plugins/mint/requirements.txt' - sandbox.copy_to( - host_src=requirements_host_src, - sandbox_dest=requirements_sandbox_dest, - recursive=False, - ) - logger.info( - f'Copied files from [{requirements_host_src}] to [{requirements_sandbox_dest}] inside sandbox.' - ) - exit_code, output = sandbox.execute(f'pip install -r {requirements_sandbox_dest}') + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance.instance_id}.') # Prepare instruction assert metadata.details is not None instruction = ToolPromptTemplate(use_tool=True)( max_total_steps=metadata.max_iterations, max_propose_solution=metadata.details['max_propose_solution'], - in_context_example=instance.in_context_example( - use_tool=True, with_feedback=False - ), + in_context_example=instance.in_context_example, task_prompt='Task:\n' + instance.prompt, ) instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you or provide the concise RESULT inside tag AND NEVER ASK FOR HUMAN HELP.\n' # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] # Here's how you can run the agent (similar to the `main` function) and get the final task state fake_user_response_fn = functools.partial( - AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[agent.__class__.__name__], + AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], task=instance, task_config={ 'max_iterations': metadata.max_iterations, @@ -140,24 +173,22 @@ def process_instance( }, ) - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=fake_user_response_fn, - sandbox=sandbox, - sid=sid, - ) + runtime = await create_runtime(config, sid=instance.instance_id) + await initialize_runtime(runtime) + + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=fake_user_response_fn, ) if state is None: raise ValueError('State should not be None.') task_state = None - if hasattr(state, 'task_state'): - task_state = state.task_state + if 'task_state' in state.extra_data: + task_state = state.extra_data['task_state'] logger.info('Task state: ' + str(task_state.to_dict())) metrics = state.metrics.get() if state.metrics else None @@ -168,30 +199,37 @@ def process_instance( histories = state.history.compatibility_for_eval_history_pairs() # Save the output - output = { - 'id': instance.task_id, - 'instance': instance.to_dict(), - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': task_state.success if task_state else False, - } - - # Close the sandbox - sandbox.close() - + output = EvalOutput( + instance_id=instance.instance_id, + instance=instance.to_dict(), + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ + 'success': task_state.success if task_state else False, + }, + ) return output if __name__ == '__main__': parser = get_parser() + SUBSETS = [ + # Eurus subset: https://arxiv.org/abs/2404.02078 + 'math', + # 'gsm8k', + 'mmlu', + 'theoremqa', + 'mbpp', + 'humaneval', + ] parser.add_argument( '--subset', - default='math', - choices=['math', 'gsm8k', 'mmlu', 'theoremqa', 'mbpp', 'humaneval'], + default='all', + choices=SUBSETS + ['all'], type=str, help='subset of the dataset to be used', ) @@ -206,19 +244,36 @@ def process_instance( # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing # so we don't need to manage file uploading to OpenDevin's repo - mint_dataset = load_dataset( - 'ryanhoangt/xingyaoww-mint-bench', name=args.subset, split='test' - ) - logger.info(f'Evaluating MINT - {args.subset} subset') - mint_tests = mint_dataset.to_pandas() + if args.subset == 'all': + subsets = SUBSETS + else: + subsets = [args.subset] - id_column = 'id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + dataset_dfs = [] + for subset in subsets: + in_context_example = load_incontext_example(subset) + _cur_dataset = load_dataset( + 'ryanhoangt/xingyaoww-mint-bench', name=subset, split='test' + ) + logger.info(f'Loaded MINT - {subset} subset') + _df = _cur_dataset.to_pandas().rename(columns={'id': 'instance_id'}) + _df['instance_id'] = _df['instance_id'].apply(lambda x: f'{subset}/{x}') # noqa + _df['in_context_example'] = in_context_example + dataset_dfs.append(_df) + logger.info(f'Loaded {len(_df)} instances for subset: {subset}') + + dataset_df = pd.concat(dataset_dfs) + logger.info(f'Loaded {len(dataset_df)} instances for subset: {subsets}') + + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + f'MINT-{args.subset}', args.agent_cls, args.max_iterations, args.eval_note, @@ -226,12 +281,7 @@ def process_instance( details={'max_propose_solution': args.max_propose_solution}, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(mint_dataset, output_file, args.eval_n_limit, id_column) + instances = prepare_dataset(dataset_df, output_file, args.eval_n_limit) run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances, metadata, output_file, args.eval_num_workers, process_instance ) diff --git a/evaluation/mint/scripts/run_infer.sh b/evaluation/mint/scripts/run_infer.sh old mode 100644 new mode 100755 index 8c8e017aa876..b9ec6d7a7a85 --- a/evaluation/mint/scripts/run_infer.sh +++ b/evaluation/mint/scripts/run_infer.sh @@ -29,15 +29,16 @@ COMMAND="poetry run python ./evaluation/mint/run_infer.py \ --llm-config $MODEL_CONFIG \ --max-iterations 5 \ --max-propose-solution 2 \ - --eval-num-workers $NUM_WORKERS \ + --eval-num-workers $NUM_WORKERS +" if [ -n "$SUBSET" ]; then echo "SUBSET: $SUBSET" COMMAND="$COMMAND --subset $SUBSET" # otherwise default to use the math subset else - echo "SUBSET: math" - COMMAND="$COMMAND --subset math" + echo "SUBSET: all" + COMMAND="$COMMAND --subset all" fi if [ -n "$EVAL_LIMIT" ]; then diff --git a/evaluation/ml_bench/README.md b/evaluation/ml_bench/README.md index 51e59cad7975..e6199327a7d9 100644 --- a/evaluation/ml_bench/README.md +++ b/evaluation/ml_bench/README.md @@ -10,40 +10,9 @@ The task introduces new challenges for LLMs, such as comprehending long and lang For more details on the ML-Bench task and dataset, please refer to the paper: [ML-Bench: Evaluating Large Language Models for Code Generation in Repository-Level Machine Learning Tasks](https://arxiv.org/abs/2311.09835). -## Setup Environment +## Setup Environment and LLM Configuration -Please follow the [OpenDevin setup guide](https://github.com/OpenDevin/OpenDevin/blob/main/docs/setup.md) to set up the local development environment for OpenDevin. - -## Configure OpenDevin and your LLM - -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" -run_as_devin = false -sandbox_container_image = "public.ecr.aws/i5g0m1f6/ml-bench" # Use the latest image from the ML-Bench repository - -[sandbox] -enable_auto_lint = true - - -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on ML-Bench diff --git a/evaluation/ml_bench/run_infer.py b/evaluation/ml_bench/run_infer.py index 9be043933177..c7baa77e03a6 100644 --- a/evaluation/ml_bench/run_infer.py +++ b/evaluation/ml_bench/run_infer.py @@ -13,29 +13,34 @@ - Clean up the code and docker image used for evaluation. """ -import asyncio -import logging import os -import pathlib from typing import Any +import pandas as pd from datasets import load_dataset from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, + load_app_config, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM -from opendevin.runtime.docker.ssh_box import DockerSSHBox +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import CmdRunAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime config = load_app_config() @@ -66,161 +71,203 @@ } -def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = True): - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - old_workspace_mount_path = config.workspace_mount_path - old_workspace_base = config.workspace_base - try: - workspace_mount_path = os.path.join( - config.workspace_mount_path, '_eval_workspace' - ) - # create process-specific workspace dir - # so that different agent don't interfere with each other. - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # reset workspace to config - config.workspace_base = workspace_mount_path - config.workspace_mount_path = workspace_mount_path - - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, - 'logs', - f"instance_{instance['id']}_pid_{os.getpid()}.log", - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f"Starting evaluation for instance {instance['id']}.\nLOG: tail -f {log_file}" - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') - - # Create a sandbox, using the instance ID and PID as the session ID to avoid conflicts - sid = str(instance['id']) + '_' + str(os.getpid()) - sandbox = DockerSSHBox(sid=sid) - - # Set up the task environment - sandbox.execute(f'conda activate {ID2CONDA[instance["github_id"]]}') - - # Clone the task repo into the sandbox - repo_url = instance['github'] - repo_name = repo_url.split('/')[-1] - sandbox.execute(f'git clone {repo_url} /workspace/{repo_name}') - sandbox.execute(f'chmod -R 777 /workspace/{repo_name}') - - # Navigate to the task's code path - task_path = os.path.join('/workspace', repo_name, instance['path'][2:]) - sandbox.execute(f'cd {task_path}') - - # Prepare the task instruction - instruction = ( - f'Please complete the Machine Learning task in the following repository: {repo_name}\n\n' - f'The task is: {instance["task"]}\n\n' - f'{instance["instruction"]}\n\n' - 'You should create a script named `run.sh` under the specified path in the repo to run the task.\n\n' - f'You can find the task repo at: {task_path}\n\n' - + ( - 'Here is the prefix code for the task:\n' - '```bash\n' - f'{instance["prefix_code"]}\n' - '```\n\n' - if instance['prefix_code'] - else '' - ) - + 'You should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).' - ) - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] - - # Run the agent - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( - agent.__class__.__name__ - ), - sandbox=sandbox, - sid=sid, - ) +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='public.ecr.aws/i5g0m1f6/ml-bench', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + # Set up the task environment + action = CmdRunAction(command=f'conda activate {ID2CONDA[instance["github_id"]]}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + repo_url = instance['github'] + repo_name = repo_url.split('/')[-1] + action = CmdRunAction(command=f'git clone {repo_url} /workspace/{repo_name}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command=f'chmod -R 777 /workspace/{repo_name}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + # Navigate to the task's code path + task_path = os.path.join('/workspace', repo_name, instance['path'][2:]) + action = CmdRunAction(command=f'cd {task_path}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def complete_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation + + repo_url = instance['github'] + repo_name = repo_url.split('/')[-1] + task_path = os.path.join('/workspace', repo_name, instance['path'][2:]) + + # Evaluate the agent's script + eval_script = os.path.join(task_path, 'run.sh') + logger.info(f'Running evaluation script: {eval_script}') + + action = CmdRunAction(command=f'cat {eval_script}', keep_prompt=False) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + if obs.exit_code == 0: + eval_script_content = obs.content + else: + logger.error(f'Error reading evaluation script: {obs.content}') + eval_script_content = '' + + action = CmdRunAction( + command=f'timeout 120s conda run -n {ID2CONDA[instance["github_id"]]} bash {eval_script}', + timeout=600, + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + if obs.exit_code == 0: + eval_output = obs.content + else: + logger.error(f'Error running evaluation script: {obs.content}') + eval_output = '' + + outputs = { + 'eval_script_content': eval_script_content, + 'eval_output': eval_output, + } + if obs.exit_code != 0 and obs.exit_code != 124: + logger.warning(f'Evaluation script failed with exit code {obs.exit_code}') + logger.warning(f'Output: {eval_output}') + outputs['success'] = int( + 'KeyboardInterrupt' in eval_output + ) # super-dainiu: assume ``KeyboardInterrupt`` is a success as is done in ML-Bench + + else: + logger.info(f'Evaluation script succeeded with exit code {obs.exit_code}') + logger.info(f'Output: {eval_output}') + outputs['success'] = 1 + outputs['eval_exit_code'] = obs.exit_code + + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") + return outputs + + +async def process_instance( + instance: Any, metadata: EvalMetadata, reset_logger: bool = True +): + config = get_config(metadata) + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance['instance_id'], log_dir) + else: + logger.info(f'Starting evaluation for instance {instance["instance_id"]}.') + + # Create a sandbox, using the instance ID and PID as the session ID to avoid conflicts + sid = str(instance['instance_id']) + + repo_url = instance['github'] + repo_name = repo_url.split('/')[-1] + task_path = os.path.join('/workspace', repo_name, instance['path'][2:]) + # Prepare the task instruction + instruction = ( + f'Please complete the Machine Learning task in the following repository: {repo_name}\n\n' + f'{instance["instruction"]}\n\n' + 'You should create a script named `run.sh` under the specified path in the repo to run the task.\n\n' + f'You can find the task repo at: {task_path}\n\n' + + ( + 'Here is the prefix code for the task:\n' + '```bash\n' + f'{instance["prefix_code"]}\n' + '```\n\n' + if instance['prefix_code'] + else '' ) - assert state is not None - metrics = state.metrics.get() if state.metrics else {} - - # Evaluate the agent's script - eval_script = os.path.join(task_path, 'run.sh') - logger.info(f'Running evaluation script: {eval_script}') - - try: - _, eval_script_content = sandbox.execute(f'cat {eval_script}') - except Exception as e: - logger.error(f'Error reading evaluation script: {e}') - eval_script_content = '' - - try: - exit_code, eval_output = sandbox.execute( - f'timeout 120s conda run -n {ID2CONDA[instance["github_id"]]} bash {eval_script}', - timeout=600, - ) - except Exception as e: - logger.error(f'Error running evaluation script: {e}') - exit_code = -1 - eval_output = '' - - if exit_code != 0 and exit_code != 124: - logger.warning(f'Evaluation script failed with exit code {exit_code}') - logger.warning(f'Output: {eval_output}') - metrics['success'] = int( - 'KeyboardInterrupt' in eval_output - ) # super-dainiu: assume ``KeyboardInterrupt`` is a success as is done in ML-Bench - else: - logger.info(f'Evaluation script succeeded with exit code {exit_code}') - logger.info(f'Output: {eval_output}') - metrics['success'] = 1 - - # history is now available as a stream of events, rather than list of pairs of (Action, Observation) - # for compatibility with the existing output format, we can remake the pairs here - # remove when it becomes unnecessary - histories = state.history.compatibility_for_eval_history_pairs() - - # Save the output - output = { - 'instance_id': instance['id'], - 'repo': repo_url, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'eval_script': eval_script_content, - 'eval_exit_code': exit_code, - 'eval_output': eval_output, - 'metrics': metrics, - } - - except Exception as e: - logger.error(f'Error processing instance {instance["id"]}: {e}') - raise - finally: - config.workspace_mount_path = old_workspace_mount_path - config.workspace_base = old_workspace_base - - # Shutdown the sandbox - sandbox.close() + + 'You should terminate the subprocess after running the task (e.g., call subprocess.Popen(args).wait()).' + ) + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + + runtime = await create_runtime(config, sid=sid) + await initialize_runtime(runtime, instance) + + # Run the agent + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN.get( + metadata.agent_class + ), + ) + assert state is not None + metrics = state.metrics.get() if state.metrics else {} + + test_result = await complete_runtime(runtime) + + # history is now available as a stream of events, rather than list of pairs of (Action, Observation) + # for compatibility with the existing output format, we can remake the pairs here + # remove when it becomes unnecessary + histories = state.history.compatibility_for_eval_history_pairs() + + # Save the output + output = EvalOutput( + instance_id=instance['instance_id'], + instance=instance.to_dict(), + instruction=instruction, + metadata=metadata, + history=histories, + test_result=test_result, + metrics=metrics, + ) return output @@ -238,30 +285,26 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = data_split = args.eval_split - # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing - # so we don't need to manage file uploading to OpenDevin's repo ml_bench = load_dataset('super-dainiu/ml-bench', split=data_split).to_pandas() + ml_bench.rename(columns={'id': 'instance_id'}, inplace=True) - id_column = 'instance_id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, - args.dataset_name, + f'ml-bench-{data_split}', args.agent_cls, args.max_iterations, args.eval_note, args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(ml_bench, output_file, args.eval_n_limit, id_column) + instances = prepare_dataset(ml_bench, output_file, args.eval_n_limit) run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances, metadata, output_file, args.eval_num_workers, process_instance ) diff --git a/evaluation/ml_bench/scripts/run_infer.sh b/evaluation/ml_bench/scripts/run_infer.sh old mode 100644 new mode 100755 diff --git a/evaluation/swe_bench/README.md b/evaluation/swe_bench/README.md index 39713ecfc975..146069ed4ea5 100644 --- a/evaluation/swe_bench/README.md +++ b/evaluation/swe_bench/README.md @@ -1,132 +1,79 @@ # SWE-Bench Evaluation with OpenDevin SWE-Bench Docker Image -This folder contains the evaluation harness that we built on top of the original [SWE-Bench benchmark](https://www.swebench.com/) ([paper](https://arxiv.org/abs/2310.06770)). We created [a fork of SWE-Bench](https://github.com/OpenDevin/OD-SWE-bench.git) mostly built on top of [the original repo](https://github.com/princeton-nlp/SWE-bench) and [containerized](#opendevin-swe-bench-docker-image) it for easy evaluation. +This folder contains the evaluation harness that we built on top of the original [SWE-Bench benchmark](https://www.swebench.com/) ([paper](https://arxiv.org/abs/2310.06770)). **UPDATE (7/1/2024): We now support the official SWE-Bench dockerized evaluation as announced [here](https://github.com/princeton-nlp/SWE-bench/blob/main/docs/20240627_docker/README.md).** -## Setup Environment +The evaluation consists of three steps: -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to set up a local development environment for OpenDevin. +1. Environment setup: [install python environment](../README.md#development-environment), [configure LLM config](../README.md#configure-opendevin-and-your-llm), and [pull docker](#opendevin-swe-bench-instance-level-docker-support). +2. [Run inference](#run-inference-on-swe-bench-instances): Generate a edit patch for each Github issue +3. [Evaluate patches using SWE-Bench docker](#evaluate-generated-patches) -## OpenDevin SWE-Bench Docker Image +## Setup Environment and LLM Configuration -In [OpenDevin-SWE-Bench fork](https://github.com/OpenDevin/OD-SWE-bench.git) (mostly from [original repo](https://github.com/princeton-nlp/SWE-bench) with some fixes), we try to pre-build the **testbed** (i.e., code of the repository we want the agent to edit) AND the **conda environment**, so that in evaluation (inference) time, we can directly leverage existing environments for efficient evaluation. +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. -**We pack everything you need for SWE-Bench inference into one, gigantic, docker image.** To use it: +## OpenDevin SWE-Bench Instance-level Docker Support -```bash -docker pull ghcr.io/opendevin/eval-swe-bench:full-v1.2.1 -``` - -The Docker image contains several important directories: - -- `/swe_util/OD-SWE-bench`: root directory for the OD-SWE-bench repository -- `/swe_util/eval_data`: directory to eval data - - `/swe_util/eval_data/eval_logs/`: evaluation logs - - `/swe_util/eval_data/eval_temp/`: temporary folder for the evaluation process - - `/swe_util/eval_data/instances/`: swe-bench raw instances - - `/swe_util/eval_data/outputs/`: model or agent outputs - - `/swe_util/eval_data/testbed_logs/`: logs for testbed building - - `/swe_util/eval_data/testbeds/`: directory for all testbeds -- `/swe_util/miniforge3/`: directory for miniforge3 - -To reproduce how we pack the image, check [this doc](./BUILD_TESTBED_AND_ENV.md). +OpenDevin now support using the [official evaluation docker](https://github.com/princeton-nlp/SWE-bench/blob/main/docs/20240627_docker/README.md) for both **[inference](#run-inference-on-swe-bench-instances) and [evaluation](#evaluate-generated-patches)**. +This is now the default behavior. -NOTE: We only support SWE-Bench lite for now. But modifying our existing scripts for full SWE-Bench should be quite straightforward. +### Download Docker Images -## Configure OpenDevin and your LLM +**(Recommended for reproducibility)** If you have extra local space (e.g., 100GB), you can try pull the [instance-level docker images](https://github.com/princeton-nlp/SWE-bench/blob/main/docs/20240627_docker/README.md#choosing-the-right-cache_level) we've prepared by running: -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" - -[sandbox] -box_type = "ssh" -timeout = 120 - -run_as_devin = false -max_budget_per_task = 4 # 4 USD - -[sandbox] -# SWEBench eval specific -use_host_network = false -enable_auto_lint = true - -# TODO: Change these to the model you want to evaluate -[llm.eval_gpt4_1106_preview_llm] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[llm.eval_some_openai_compatible_model_llm] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 +```bash +evaluation/swe_bench/scripts/docker/pull_all_eval_docker.sh instance ``` -## Test if your environment works - -Make sure your Docker daemon is running, and you have pulled the `eval-swe-bench:full-v1.2` -docker image. Then run this python script: +If you want to save disk space a bit (e.g., with ~50GB free disk space), while speeding up the image pre-build process, you can pull the environment-level docker images: ```bash -# export USE_INSTANCE_IMAGE=true # if you want to test support for instance-level docker images -poetry run python evaluation/swe_bench/swe_env_box.py +evaluation/swe_bench/scripts/docker/pull_all_eval_docker.sh env ``` -If you get to the interactive shell successfully, it means your environment works! -If you see an error, please make sure your `config.toml` contains all -`SWEBench eval specific` settings as shown in the previous section. - ## Run Inference on SWE-Bench Instances +Make sure your Docker daemon is running, and you have pulled the [instance-level docker image](#opendevin-swe-bench-instance-level-docker-support). + ```bash ./evaluation/swe_bench/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [max_iter] [num_workers] -# e.g., ./evaluation/swe_bench/scripts/run_infer.sh eval_gpt4_1106_preview_llm HEAD CodeActAgent 300 +# e.g., ./evaluation/swe_bench/scripts/run_infer.sh llm.eval_gpt4_1106_preview HEAD CodeActAgent 300 ``` -where `model_config` is mandatory, while `agent` and `eval_limit` are optional. +where `model_config` is mandatory, and the rest are optional. -`model_config`, e.g. `eval_gpt4_1106_preview`, is the config group name for your +- `model_config`, e.g. `eval_gpt4_1106_preview`, is the config group name for your LLM settings, as defined in your `config.toml`. - -`git-version`, e.g. `HEAD`, is the git commit hash of the OpenDevin version you would +- `git-version`, e.g. `HEAD`, is the git commit hash of the OpenDevin version you would like to evaluate. It could also be a release tag like `0.6.2`. - -`agent`, e.g. `CodeActAgent`, is the name of the agent for benchmarks, defaulting +- `agent`, e.g. `CodeActAgent`, is the name of the agent for benchmarks, defaulting to `CodeActAgent`. - -`eval_limit`, e.g. `10`, limits the evaluation to the first `eval_limit` instances. By +- `eval_limit`, e.g. `10`, limits the evaluation to the first `eval_limit` instances. By default, the script evaluates the entire SWE-bench_Lite test set (300 issues). Note: in order to use `eval_limit`, you must also set `agent`. - -`max_iter`, e.g. `20`, is the maximum number of iterations for the agent to run. By +- `max_iter`, e.g. `20`, is the maximum number of iterations for the agent to run. By default, it is set to 30. - -`num_workers`, e.g. `3`, is the number of parallel workers to run the evaluation. By +- `num_workers`, e.g. `3`, is the number of parallel workers to run the evaluation. By default, it is set to 1. There are also two optional environment variables you can set. ``` -export USE_HINT_TEXT=true # if you want to use hint text in the evaluation. Ignore this if you are not sure. -export USE_INSTANCE_IMAGE=true # if you want to use instance-level docker images +export USE_HINT_TEXT=true # if you want to use hint text in the evaluation. Default to false. Ignore this if you are not sure. +export USE_INSTANCE_IMAGE=true # if you want to use instance-level docker images. Default to true ``` -Let's say you'd like to run 10 instances using `eval_gpt4_1106_preview_llm` and CodeActAgent, +Let's say you'd like to run 10 instances using `llm.eval_gpt4_1106_preview` and CodeActAgent, then your command would be: ```bash -./evaluation/swe_bench/scripts/run_infer.sh eval_gpt4_1106_preview_llm HEAD CodeActAgent 10 +./evaluation/swe_bench/scripts/run_infer.sh llm.eval_gpt4_1106_preview HEAD CodeActAgent 10 ``` +### Specify a subset of tasks to run infer + If you would like to specify a list of tasks you'd like to benchmark on, you could create a `config.toml` under `./evaluation/swe_bench/` folder, and put a list attribute named `selected_ids`, e.g. @@ -146,22 +93,12 @@ With `output.jsonl` file, you can run `eval_infer.sh` to evaluate generated patc **This evaluation is performed using the official dockerized evaluation announced [here](https://github.com/princeton-nlp/SWE-bench/blob/main/docs/20240627_docker/README.md).** -If you want to evaluate existing results, you should first run this to clone existing outputs - -```bash -git clone https://huggingface.co/spaces/OpenDevin/evaluation evaluation/evaluation_outputs -``` - -If you have extra local space (e.g., 500GB), you can try pull the [instance-level docker images](https://github.com/princeton-nlp/SWE-bench/blob/main/docs/20240627_docker/README.md#choosing-the-right-cache_level) we've prepared to speed up the evaluation by running: +> If you want to evaluate existing results, you should first run this to clone existing outputs +>```bash +>git clone https://huggingface.co/spaces/OpenDevin/evaluation evaluation/evaluation_outputs +>``` -```bash -evaluation/swe_bench/scripts/docker/pull_all_eval_docker.sh instance -``` - -If you want to save disk space a bit (e.g., with ~50GB free disk space), while speeding up the image pre-build process, you can pull the environment-level docker images: -```bash -evaluation/swe_bench/scripts/docker/pull_all_eval_docker.sh env -``` +NOTE, you should have already pulled the instance-level OR env-level docker images following [this section](#opendevin-swe-bench-instance-level-docker-support). Then you can run the following: @@ -171,13 +108,13 @@ Then you can run the following: ./evaluation/swe_bench/scripts/eval_infer.sh evaluation/evaluation_outputs/outputs/swe_bench/CodeActAgent/gpt-4-1106-preview_maxiter_50_N_v1.0/output.jsonl ``` -PS: You can also pass in a JSONL with [SWE-Bench format](https://github.com/princeton-nlp/SWE-bench/blob/main/tutorials/evaluation.md#-creating-predictions) to `./evaluation/swe_bench/scripts/eval_infer.sh`, where each line is a JSON of `{"model_patch": "XXX", "model_name_or_path": "YYY", "instance_id": "ZZZ"}`. +> You can also pass in a JSONL with [SWE-Bench format](https://github.com/princeton-nlp/SWE-bench/blob/main/tutorials/evaluation.md#-creating-predictions) to `./evaluation/swe_bench/scripts/eval_infer.sh`, where each line is a JSON of `{"model_patch": "XXX", "model_name_or_path": "YYY", "instance_id": "ZZZ"}`. The final results will be saved to `evaluation/evaluation_outputs/outputs/swe_bench/CodeActAgent/gpt-4-1106-preview_maxiter_50_N_v1.0/` with the following files/directory: - `README.md`: a report showing what are the instances that passed, failed, etc. - `report.json`: a JSON file that contains keys like `"resolved_ids"` pointing to instance IDs that are resolved by the agent. -- `eval_outputs/`: a directory of test logs +- `logs/`: a directory of test logs ## Visualize Results @@ -189,9 +126,10 @@ git clone https://huggingface.co/spaces/OpenDevin/evaluation **(optional) setup streamlit environment with conda**: ```bash +cd evaluation conda create -n streamlit python=3.10 conda activate streamlit -pip install streamlit altair st_pages +pip install -r requirements.txt ``` **run the visualizer**: diff --git a/evaluation/swe_bench/prompt.py b/evaluation/swe_bench/prompt.py new file mode 100644 index 000000000000..6b9da9afb11f --- /dev/null +++ b/evaluation/swe_bench/prompt.py @@ -0,0 +1,28 @@ +CODEACT_SWE_PROMPT = """Now, you're going to solve this issue on your own. Your terminal session has started and you're in the repository's root directory. You can use any bash commands or the special interface to help you. Edit all the files you need to and run any checks or tests that you want. +Remember, YOU CAN ONLY ENTER ONE COMMAND AT A TIME. You should always wait for feedback after every command. +When you're satisfied with all of the changes you've made, you can run the following command: exit . +Note however that you cannot use any interactive session commands (e.g. vim) in this environment, but you can write scripts and run them. E.g. you can write a python script and then run it with `python .py`. + +NOTE ABOUT THE EDIT COMMAND: Indentation really matters! When editing a file, make sure to insert appropriate indentation before each line! + +IMPORTANT TIPS: +1. Always start by trying to replicate the bug that the issues discusses. + If the issue includes code for reproducing the bug, we recommend that you re-implement that in your environment, and run it to make sure you can reproduce the bug. + Then start trying to fix it. + When you think you've fixed the bug, re-run the bug reproduction script to make sure that the bug has indeed been fixed. + + If the bug reproduction script does not print anything when it successfully runs, we recommend adding a print("Script completed successfully, no errors.") command at the end of the file, + so that you can be sure that the script indeed ran fine all the way through. + +2. If you run a command and it doesn't work, try running a different command. A command that did not work once will not work the second time unless you modify it! + +3. If you open a file and need to get to an area around a specific line that is not in the first 100 lines, say line 583, don't just use the scroll_down command multiple times. Instead, use the goto 583 command. It's much quicker. + +4. If the bug reproduction script requires inputting/reading a specific file, such as buggy-input.png, and you'd like to understand how to input that file, conduct a search in the existing repo code, to see whether someone else has already done that. Do this by running the command: find_file("buggy-input.png") If that doesn't work, use the linux 'find' command. + +5. Always make sure to look at the currently open file and the current working directory (which appears right after the currently open file). The currently open file might be in a different directory than the working directory! Note that some commands, such as 'create', open files, so they might change the current open file. + +6. When editing files, it is easy to accidentally specify a wrong line number or to write code with incorrect indentation. Always check the code after you issue an edit to make sure that it reflects what you wanted to accomplish. If it didn't, issue another command to fix it. + +[Current directory: /workspace/{workspace_dir_name}] +""" diff --git a/evaluation/swe_bench/run_infer.py b/evaluation/swe_bench/run_infer.py index 0578eb588a3b..089b6f040200 100644 --- a/evaluation/swe_bench/run_infer.py +++ b/evaluation/swe_bench/run_infer.py @@ -1,34 +1,39 @@ import asyncio -import logging +import json import os -import pathlib +import tempfile +from typing import Any import pandas as pd import toml -import whatthepatch from datasets import load_dataset import agenthub -from evaluation.swe_bench.swe_env_box import SWEBenchSSHBox +from evaluation.swe_bench.prompt import CODEACT_SWE_PROMPT from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import CmdRunAction +from opendevin.events.observation import CmdOutputObservation, ErrorObservation +from opendevin.runtime.runtime import Runtime -USE_HINT_TEXT = os.environ.get('USE_HINT_TEXT', 'false') == 'true' -USE_INSTANCE_IMAGE = os.environ.get('USE_INSTANCE_IMAGE', 'false') == 'true' +USE_HINT_TEXT = os.environ.get('USE_HINT_TEXT', 'false').lower() == 'true' +USE_INSTANCE_IMAGE = os.environ.get('USE_INSTANCE_IMAGE', 'false').lower() == 'true' AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { 'CodeActAgent': codeact_user_response, @@ -41,183 +46,12 @@ } -def get_test_result(instance, sandbox, workspace_dir_name): - test_result = {'result': {}, 'metadata': {}} - # NOTE: if you need to do something in the sandbox to get the correctness metric, modify this function - try: - test_patch_parsed = whatthepatch.parse_patch(instance.test_patch) - # get a list of filepaths that are involved in the patch - involved_filepaths = set() - for patch in test_patch_parsed: - involved_filepaths.add(patch.header.old_path.removeprefix('a/')) - involved_filepaths.add(patch.header.new_path.removeprefix('b/')) - involved_filepaths = list(involved_filepaths) - test_result['metadata']['1_test_patch_parse_success'] = True - test_result['metadata']['1_test_involved_filepaths'] = involved_filepaths - except Exception as e: - logger.error( - f'Error parsing test patch for instance {instance.instance_id}: {e}' - ) - test_result['metadata']['1_test_patch_parse_success'] = False - test_result['metadata']['1_test_patch_parse_error'] = str(e) - test_result['metadata']['1_test_involved_filepaths'] = None - involved_filepaths = [] - - # Try to revert the changes for involved filepaths - err_code, output = sandbox.execute(f'cd /workspace/{workspace_dir_name}') - test_result['metadata']['2_revert_test_involved_filepaths_success'] = [] - for filepath in involved_filepaths: - err_code, output = sandbox.execute( - f'git checkout {instance["base_commit"]} -- {filepath}' - ) - if err_code != 0: - logger.error(f'Error reverting changes for {filepath}: {output}') - test_result['metadata']['2_revert_test_involved_filepaths_success'].append( - False - ) - else: - test_result['metadata']['2_revert_test_involved_filepaths_success'].append( - True - ) - - # Apply the testcase - err_code, output = sandbox.execute('git apply $SWE_TASK_DIR/test.patch') - if err_code != 0: - logger.error(f'Error applying test patch: {output}') - test_result['metadata']['3_apply_test_patch_success'] = False - test_result['metadata']['3_apply_test_patch_error'] = output - else: - test_result['metadata']['3_apply_test_patch_success'] = True - - # Run the test command - err_code, output = sandbox.execute( - '$TEST_CMD > /workspace/$SWE_INSTANCE_ID.log 2>&1' - ) - if err_code != 0: - logger.error(f'Error running test command: {output}') - test_result['metadata']['4_run_test_command_success'] = False - test_result['metadata']['4_run_test_command_error'] = output - else: - test_result['metadata']['4_run_test_command_success'] = True - - # Get the test output - err_code, output = sandbox.execute('cat /workspace/$SWE_INSTANCE_ID.log') - if err_code != 0: - logger.error(f'Error getting test output: {output}') - test_result['metadata']['4_get_test_output_success'] = False - test_result['metadata']['4_get_test_output_error'] = output - else: - test_result['metadata']['4_get_test_output_success'] = True - test_result['test_output'] = output - - # Reformat instance.json - # $SWE_TASK_DIR/instance.json is a dict {"XXX": "YYY"}, add a [ before and a ] after - err_code, output = sandbox.execute( - ( - 'cat $SWE_TASK_DIR/instance.json | sed "s/^{/[{/" | sed "s/}$/}]/" > /workspace/instance.json' - ) - ) - if err_code != 0: - logger.error(f'Error creating instance.json: {output}') - test_result['metadata']['5_reformat_instance_json_success'] = False - test_result['metadata']['5_reformat_instance_json_error'] = output - else: - test_result['metadata']['5_reformat_instance_json_success'] = True +def _get_swebench_workspace_dir_name(instance: pd.Series) -> str: + return f'{instance.repo}__{instance.version}'.replace('/', '__') - if USE_INSTANCE_IMAGE: - # instance report is not supported in instance image mode - test_result['metadata']['6_get_instance_report_success'] = False - test_result['metadata']['6_get_instance_report_error'] = ( - 'Instance report is not supported in instance image mode.' - ) - - else: - # Get the instance report - err_code, output = sandbox.execute( - ( - 'cd /swe_util/OD-SWE-bench ' - '&& export PYTHONPATH=$(pwd):$PYTHONPATH ' - '&& conda run -n swe-bench-eval python swebench/metrics/get_instance_report.py --swe_bench_task /workspace/instance.json --log_path /workspace/$SWE_INSTANCE_ID.log' - ) - ) - if err_code != 0: - logger.error(f'Error getting instance report: {output}') - test_result['metadata']['6_get_instance_report_success'] = False - test_result['metadata']['6_get_instance_report_error'] = output - else: - test_result['metadata']['6_get_instance_report_success'] = True - test_result['result_raw'] = output - - # try to parse output - for line in output.strip().split('\n'): - line = line.strip('-') - try: - key, value = line.split(':') - except ValueError: - # skip this line - print(f'Error parsing result line: {line}') - continue - value = value.strip() - try: - value = int(value) - except ValueError: - pass - test_result['result'][key.strip()] = value - return test_result - - -def process_instance( - instance: pd.Series, - metadata: EvalMetadata, - reset_logger: bool = True, -): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - - workspace_mount_path = os.path.join(config.workspace_mount_path, '_eval_workspace') - # create process-specific workspace dir - workspace_mount_path = os.path.join(workspace_mount_path, str(os.getpid())) - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) - - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, - 'infer_logs', - f'instance_{instance.instance_id}.log', - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {instance.instance_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - os.makedirs(os.path.dirname(log_file), exist_ok=True) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - else: - logger.info(f'Starting evaluation for instance {instance.instance_id}.') - - # NOTE: this is something special we do for SWE-Bench due to the reason described in the previous section - # You can omit this if you don't need to setup specialized sandbox - workspace_dir_name = f'{instance.repo}__{instance.version}'.replace('/', '__') - sandbox = SWEBenchSSHBox.get_box_for_instance( - instance, - workspace_dir_name, - workspace_mount_path=workspace_mount_path, - sandbox_plugins=agenthub.Agent.get_cls(metadata.agent_class).sandbox_plugins, - use_instance_image=USE_INSTANCE_IMAGE, - ) +def get_instruction(instance: pd.Series, metadata: EvalMetadata): + workspace_dir_name = _get_swebench_workspace_dir_name(instance) # Prepare instruction if metadata.agent_class == 'CodeActSWEAgent': instruction = ( @@ -226,39 +60,11 @@ def process_instance( f'{instance.problem_statement}\n' '--- END ISSUE ---\n\n' ) - if USE_HINT_TEXT and instance.hints_text: instruction += ( f'--- BEGIN HINTS ---\n{instance.hints_text}\n--- END HINTS ---\n' ) - instruction += f"""Now, you're going to solve this issue on your own. Your terminal session has started and you're in the repository's root directory. You can use any bash commands or the special interface to help you. Edit all the files you need to and run any checks or tests that you want. -Remember, YOU CAN ONLY ENTER ONE COMMAND AT A TIME. You should always wait for feedback after every command. -When you're satisfied with all of the changes you've made, you can run the following command: exit . -Note however that you cannot use any interactive session commands (e.g. vim) in this environment, but you can write scripts and run them. E.g. you can write a python script and then run it with `python .py`. - -NOTE ABOUT THE EDIT COMMAND: Indentation really matters! When editing a file, make sure to insert appropriate indentation before each line! - -IMPORTANT TIPS: -1. Always start by trying to replicate the bug that the issues discusses. - If the issue includes code for reproducing the bug, we recommend that you re-implement that in your environment, and run it to make sure you can reproduce the bug. - Then start trying to fix it. - When you think you've fixed the bug, re-run the bug reproduction script to make sure that the bug has indeed been fixed. - - If the bug reproduction script does not print anything when it successfully runs, we recommend adding a print("Script completed successfully, no errors.") command at the end of the file, - so that you can be sure that the script indeed ran fine all the way through. - -2. If you run a command and it doesn't work, try running a different command. A command that did not work once will not work the second time unless you modify it! - -3. If you open a file and need to get to an area around a specific line that is not in the first 100 lines, say line 583, don't just use the scroll_down command multiple times. Instead, use the goto 583 command. It's much quicker. - -4. If the bug reproduction script requires inputting/reading a specific file, such as buggy-input.png, and you'd like to understand how to input that file, conduct a search in the existing repo code, to see whether someone else has already done that. Do this by running the command: find_file("buggy-input.png") If that doesn't work, use the linux 'find' command. - -5. Always make sure to look at the currently open file and the current working directory (which appears right after the currently open file). The currently open file might be in a different directory than the working directory! Note that some commands, such as 'create', open files, so they might change the current open file. - -6. When editing files, it is easy to accidentally specify a wrong line number or to write code with incorrect indentation. Always check the code after you issue an edit to make sure that it reflects what you wanted to accomplish. If it didn't, issue another command to fix it. - -[Current directory: /workspace/{workspace_dir_name}] -""" + instruction += CODEACT_SWE_PROMPT.format(workspace_dir_name=workspace_dir_name) else: # Testing general agents instruction = ( @@ -276,61 +82,278 @@ def process_instance( ) # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + return instruction - # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[ - agent.__class__.__name__ - ], - sandbox=sandbox, - sid=instance.instance_id, + +def get_config( + instance: pd.Series, + metadata: EvalMetadata, +) -> AppConfig: + SWE_BENCH_CONTAINER_IMAGE = 'ghcr.io/opendevin/eval-swe-bench:full-v1.2.1' + if USE_INSTANCE_IMAGE: + # We use a different instance image for the each instance of swe-bench eval + container_image = 'sweb.eval.x86_64.' + instance['instance_id'] + else: + container_image = SWE_BENCH_CONTAINER_IMAGE + + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_budget_per_task=4, + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image=container_image, + enable_auto_lint=True, + use_host_network=False, + # large enough timeout, since some testcases take very long to run + timeout=300, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required +): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info('-' * 30) + logger.info('BEGIN Runtime Initialization Fn') + logger.info('-' * 30) + workspace_dir_name = _get_swebench_workspace_dir_name(instance) + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction( + command=f"""echo 'export SWE_INSTANCE_ID={instance['instance_id']}' >> ~/.bashrc && echo 'export PIP_CACHE_DIR=~/.cache/pip' >> ~/.bashrc && echo "alias git='git --no-pager'" >> ~/.bashrc""" + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + if USE_INSTANCE_IMAGE: + # inject the init script + script_dir = os.path.dirname(__file__) + + # inject the instance info + action = CmdRunAction(command='mkdir -p /swe_util/eval_data/instances') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert ( + obs.exit_code == 0 + ), f'Failed to create /swe_util/eval_data/instances: {obs.content}' + + swe_instance_json_name = 'swe-bench-instance.json' + with tempfile.TemporaryDirectory() as temp_dir: + # Construct the full path for the desired file name within the temporary directory + temp_file_path = os.path.join(temp_dir, swe_instance_json_name) + # Write to the file with the desired name within the temporary directory + with open(temp_file_path, 'w') as f: + if not isinstance(instance, dict): + json.dump([instance.to_dict()], f) + else: + json.dump([instance], f) + + # Copy the file to the desired location + await runtime.copy_to(temp_file_path, '/swe_util/eval_data/instances/') + + # inject the instance swe entry + await runtime.copy_to( + str(os.path.join(script_dir, 'scripts/setup/instance_swe_entry.sh')), + '/swe_util/', + ) + action = CmdRunAction(command='cat ~/.bashrc') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + action = CmdRunAction(command='source ~/.bashrc') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + action = CmdRunAction(command='source /swe_util/instance_swe_entry.sh') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + else: + action = CmdRunAction(command='source /swe_util/swe_entry.sh') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert ( + obs.exit_code == 0 + ), f'Failed to source /swe_util/swe_entry.sh: {obs.content}' + + action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + action = CmdRunAction(command='git reset --hard') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + action = CmdRunAction( + command='for remote_name in $(git remote); do git remote remove "${remote_name}"; done' + ) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + logger.info('-' * 30) + logger.info('END Runtime Initialization Fn') + logger.info('-' * 30) + + +async def complete_runtime( + runtime: Runtime, + instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info('-' * 30) + logger.info('BEGIN Runtime Completion Fn') + logger.info('-' * 30) + obs: CmdOutputObservation + workspace_dir_name = _get_swebench_workspace_dir_name(instance) + + action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + action = CmdRunAction(command='git config --global core.pager ""') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + action = CmdRunAction(command='git add -A') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + assert obs.exit_code == 0 + + n_retries = 0 + git_patch = None + while n_retries < 5: + action = CmdRunAction( + command=f'git diff --no-color --cached {instance["base_commit"]}', + keep_prompt=False, ) + action.timeout = 600 + 100 * n_retries + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + n_retries += 1 + if isinstance(obs, CmdOutputObservation): + if obs.exit_code == 0: + git_patch = obs.content.strip() + break + else: + logger.info('Failed to get git diff, retrying...') + await asyncio.sleep(10) + elif isinstance(obs, ErrorObservation): + logger.error(f'Error occurred: {obs.content}. Retrying...') + await asyncio.sleep(10) + else: + raise ValueError(f'Unexpected observation type: {type(obs)}') + + logger.info('-' * 30) + logger.info('END Runtime Completion Fn') + logger.info('-' * 30) + return {'git_patch': git_patch} + + +async def process_instance( + instance: pd.Series, + metadata: EvalMetadata, + reset_logger: bool = True, +) -> EvalOutput: + config = get_config(instance, metadata) + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation + if reset_logger: + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir) + else: + logger.info(f'Starting evaluation for instance {instance.instance_id}.') + + runtime = await create_runtime(config, sid=instance.instance_id) + await initialize_runtime(runtime, instance) + + instruction = get_instruction(instance, metadata) + + # Here's how you can run the agent (similar to the `main` function) and get the final task state + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], ) # ======= THIS IS SWE-Bench specific ======= # Get git patch - git_patch = sandbox.get_diff_patch() - logger.info(f'Got git diff for instance {instance.instance_id}') + return_val = await complete_runtime(runtime, instance) + git_patch = return_val['git_patch'] + logger.info( + f'Got git diff for instance {instance.instance_id}:\n--------\n{git_patch}\n--------' + ) # ========================================== # ======= Attempt to evaluate the agent's edits ======= - # TODO: if you need to do something in the sandbox to get the correctness metric, modify this function - test_result = get_test_result(instance, sandbox, workspace_dir_name) + # we use eval_infer.sh to evaluate the agent's edits, not here + # because the agent may alter the environment / testcases + test_result = { + 'git_patch': git_patch, + } # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation. - if state is None: raise ValueError('State should not be None.') - metrics = state.metrics.get() if state.metrics else None - # history is now available as a stream of events, rather than list of pairs of (Action, Observation) # for compatibility with the existing output format, we can remake the pairs here # remove when it becomes unnecessary histories = state.history.compatibility_for_eval_history_pairs() + metrics = state.metrics.get() if state.metrics else None # Save the output - output = { - 'instance_id': instance.instance_id, - 'swe_instance': instance.to_dict(), # SWE Bench specific - 'instruction': instruction, - 'git_patch': git_patch, # SWE Bench specific - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': test_result, - } - - # Close the sandbox - sandbox.close() + output = EvalOutput( + instance_id=instance.instance_id, + instruction=instruction, + instance=instance.to_dict(), # SWE Bench specific + test_result=test_result, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + ) return output @@ -358,11 +381,12 @@ def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame: dataset = load_dataset('princeton-nlp/SWE-bench_Lite') swe_bench_tests = filter_dataset(dataset['test'].to_pandas(), 'instance_id') - id_column = 'instance_id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - if args.llm_config and llm_config is None: - raise ValueError(f'Could not find LLM config {args.llm_config}') - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') details = {} _agent_cls = agenthub.Agent.get_cls(args.agent_cls) @@ -382,14 +406,10 @@ def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame: ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset( - swe_bench_tests, output_file, args.eval_n_limit, id_column - ) - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(swe_bench_tests, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/swe_bench/scripts/eval/convert_od_output_to_swe_json.py b/evaluation/swe_bench/scripts/eval/convert_od_output_to_swe_json.py index 49f167357c41..41d4fe6dae58 100644 --- a/evaluation/swe_bench/scripts/eval/convert_od_output_to_swe_json.py +++ b/evaluation/swe_bench/scripts/eval/convert_od_output_to_swe_json.py @@ -45,9 +45,16 @@ def process_git_patch(patch): def convert_row_to_swebench_format(row): + if 'git_patch' in row: + model_patch = row['git_patch'] + elif 'test_result' in row and 'git_patch' in row['test_result']: + model_patch = row['test_result']['git_patch'] + else: + raise ValueError(f'Row {row} does not have a git_patch') + return { 'instance_id': row['instance_id'], - 'model_patch': process_git_patch(row['git_patch']), + 'model_patch': process_git_patch(model_patch), 'model_name_or_path': model_name, } diff --git a/evaluation/swe_bench/scripts/run_infer.sh b/evaluation/swe_bench/scripts/run_infer.sh index 65a76376c9c6..a31e2c311f60 100755 --- a/evaluation/swe_bench/scripts/run_infer.sh +++ b/evaluation/swe_bench/scripts/run_infer.sh @@ -27,8 +27,8 @@ if [ -z "$MAX_ITER" ]; then fi if [ -z "$USE_INSTANCE_IMAGE" ]; then - echo "USE_INSTANCE_IMAGE not specified, use default false" - USE_INSTANCE_IMAGE=false + echo "USE_INSTANCE_IMAGE not specified, use default true" + USE_INSTANCE_IMAGE=true fi export USE_INSTANCE_IMAGE=$USE_INSTANCE_IMAGE diff --git a/evaluation/swe_bench/scripts/setup/instance_swe_entry.sh b/evaluation/swe_bench/scripts/setup/instance_swe_entry.sh index c0b6ceb5cd4f..1d4051fd8404 100644 --- a/evaluation/swe_bench/scripts/setup/instance_swe_entry.sh +++ b/evaluation/swe_bench/scripts/setup/instance_swe_entry.sh @@ -45,7 +45,11 @@ echo "$item" | jq -r '.patch' > $SWE_TASK_DIR/gold.patch echo "$item" | jq 'del(.test_patch, .patch)' > $SWE_TASK_DIR/instance.json # Clear the workspace -rm -rf /workspace/* +if [ -d /workspace ]; then + rm -rf /workspace/* +else + mkdir /workspace +fi # Copy repo to workspace if [ -d /workspace/$WORKSPACE_NAME ]; then rm -rf /workspace/$WORKSPACE_NAME @@ -61,7 +65,7 @@ mkdir -p $SWE_TASK_DIR/reset_testbed_log_dir REPO_PATH=/workspace/$WORKSPACE_NAME echo "Repo Path: $REPO_PATH" -echo "Test Command: $TEST_CMD" +# echo "Test Command: $TEST_CMD" echo "export REPO_PATH=\"$REPO_PATH\"" >> ~/.bashrc # echo "export TEST_CMD=\"$TEST_CMD\"" >> ~/.bashrc diff --git a/evaluation/swe_bench/swe_env_box.py b/evaluation/swe_bench/swe_env_box.py deleted file mode 100644 index 47427c683615..000000000000 --- a/evaluation/swe_bench/swe_env_box.py +++ /dev/null @@ -1,313 +0,0 @@ -import json -import os -import sys -import tempfile -import uuid - -from datasets import load_dataset -from swebench.harness.constants import MAP_REPO_TO_TEST_FRAMEWORK -from swebench.harness.utils import get_test_directives - -from opendevin.core.config import AppConfig, SandboxConfig, load_app_config -from opendevin.core.logger import opendevin_logger as logger -from opendevin.runtime.docker.ssh_box import DockerSSHBox -from opendevin.runtime.plugins import ( - AgentSkillsRequirement, - JupyterRequirement, - PluginRequirement, -) - -SWE_BENCH_CONTAINER_IMAGE = 'ghcr.io/opendevin/eval-swe-bench:full-v1.2.1' - - -def get_image_name_from_instance_id(instance_id: str) -> str: - return 'sweb.eval.x86_64.' + instance_id - - -class SWEBenchSSHBox(DockerSSHBox): - def __init__( - self, - config: AppConfig, - container_image: str, - timeout: int = 120, - sid: str | None = None, - swe_instance_id: str | None = None, - swe_instance: dict | None = None, - skip_workspace_mount: bool = True, - sandbox_plugins: list[PluginRequirement] = [], # noqa: B006 - workspace_dir_name: str | None = None, - use_instance_image: bool = False, - ): - if swe_instance_id is None: - raise ValueError('swe_instance_id must be provided!') - self.swe_instance_id = swe_instance_id - self.swe_instance = swe_instance - self.skip_workspace_mount = skip_workspace_mount - self.workspace_dir_name = workspace_dir_name - - assert ( - container_image is not None - ), 'container_image is required for SWEBenchSSHBox!' - # Need to run as root to use SWEBench container - sid = f'swe_bench_{swe_instance_id}_' + str(uuid.uuid4()) - logger.info(f'===Using container image: {container_image}') - super().__init__( - config=SandboxConfig(container_image=container_image, timeout=timeout), - persist_sandbox=config.persist_sandbox, - workspace_mount_path=config.workspace_mount_path, - sandbox_workspace_dir=config.workspace_mount_path_in_sandbox, - cache_dir=config.cache_dir, - run_as_devin=config.run_as_devin, - ssh_hostname=config.ssh_hostname, - ssh_password=config.ssh_password, - ssh_port=config.ssh_port, - sid=sid, - ) - self.init_plugins(sandbox_plugins) - - exit_code, output = self.execute('mv ~/.bashrc ~/.bashrc.bak') - assert exit_code == 0, f'Failed to backup ~/.bashrc: {output}' - - exit_code, output = self.execute( - f"echo 'export SWE_INSTANCE_ID={self.swe_instance_id}' >> ~/.bashrc && echo 'export PIP_CACHE_DIR=~/.cache/pip' >> ~/.bashrc && echo \"alias git='git --no-pager'\" >> ~/.bashrc" - ) - assert exit_code == 0, f'Failed to set SWE_INSTANCE_ID in ~/.bashrc: {output}' - - logger.info('Sourcing swe_entry.sh to set up environment variables') - logger.info( - 'Initialization of SWEBench may take approximately 10 minutes due to long-running installations, such as those requiring compilation.' - ) - logger.info(f'Use instance image: {use_instance_image}') - if use_instance_image: - # we directly inject the instance info into the container and the init script - script_dir = os.path.dirname(__file__) - - # inject test command - test_type = MAP_REPO_TO_TEST_FRAMEWORK[swe_instance['repo']][ - swe_instance['version'] - ] - swe_instance['test_directives'] = get_test_directives(swe_instance) - swe_instance['test_cmd'] = ( - f"{test_type} {' '.join(swe_instance['test_directives'])}" - ) - exit_code, output = self.execute( - f"""echo "export TEST_CMD='{swe_instance["test_cmd"]}'" >> ~/.bashrc""" - ) - # assert exit_code == 0, f'Failed to set TEST_CMD in ~/.bashrc: {output}' - - # inject the instance info - self.execute('mkdir -p /swe_util/eval_data/instances') - swe_instance_json_name = 'swe-bench-instance.json' - with tempfile.TemporaryDirectory() as temp_dir: - # Construct the full path for the desired file name within the temporary directory - temp_file_path = os.path.join(temp_dir, swe_instance_json_name) - # Write to the file with the desired name within the temporary directory - with open(temp_file_path, 'w') as f: - if not isinstance(swe_instance, dict): - json.dump([swe_instance.to_dict()], f) - else: - json.dump([swe_instance], f) - - # Copy the file to the desired location - self.copy_to(temp_file_path, '/swe_util/eval_data/instances/') - - # inject the init script - self.copy_to( - str(os.path.join(script_dir, 'scripts/setup/instance_swe_entry.sh')), - '/swe_util/', - ) - self.execute('cat ~/.bashrc') - self.execute('source ~/.bashrc') - - self.execute('source /swe_util/instance_swe_entry.sh', timeout=600) - logger.info('exit code: %d', exit_code) - logger.info(output) - assert exit_code == 0, f'Failed to source swe_entry.sh: {output}' - logger.info('Sourced swe_entry.sh successfully') - else: - exit_code, output = self.execute( - 'source /swe_util/swe_entry.sh', timeout=600 - ) - logger.info('exit code: %d', exit_code) - logger.info(output) - assert exit_code == 0, f'Failed to source swe_entry.sh: {output}' - logger.info('Sourced swe_entry.sh successfully') - - @property - def volumes(self): - if self.skip_workspace_mount: - return { - k: v - for k, v in super().volumes.items() - if not v['bind'] == self.sandbox_workspace_dir - } - return super().volumes - - @classmethod - def get_box_for_instance( - cls, - instance, - config: AppConfig, - workspace_dir_name=None, - skip_workspace_mount: bool = True, - workspace_mount_path: str | None = None, - sandbox_plugins: list[PluginRequirement] = [], # noqa: B006 - use_instance_image: bool = False, - ) -> 'SWEBenchSSHBox': - if workspace_dir_name is None: - workspace_dir_name = f"{instance['repo']}__{instance['version']}".replace( - '/', '__' - ) - old_workspace_base = config.workspace_base - old_workspace_mount_path = config.workspace_mount_path - - try: - config.workspace_base = workspace_mount_path - config.workspace_mount_path = workspace_mount_path - - # linting python after editing helps LLM fix indentations - config.sandbox.enable_auto_lint = True - # Need to run as root to use SWEBench container - config.run_as_devin = False - if use_instance_image: - container_image = get_image_name_from_instance_id( - instance['instance_id'] - ) - else: - container_image = SWE_BENCH_CONTAINER_IMAGE - sandbox = cls( - container_image=container_image, - config=config, - swe_instance_id=instance['instance_id'], - swe_instance=instance, - skip_workspace_mount=skip_workspace_mount, - sandbox_plugins=sandbox_plugins, - workspace_dir_name=workspace_dir_name, - use_instance_image=use_instance_image, - ) - logger.info(f"SSH box started for instance {instance['instance_id']}.") - - # cd to the repo - exit_code, output = sandbox.execute(f'cd /workspace/{workspace_dir_name}') - if exit_code != 0: - logger.error(f'Failed to cd to the repo: {output}') - sys.exit(1) - - # remove all future commits & remote following Devin - # https://www.cognition-labs.com/post/swe-bench-technical-report - exit_code, output = sandbox.execute('git reset --hard') - if exit_code != 0: - logger.error(f'Failed to reset the repo: {output}') - sys.exit(1) - exit_code, output = sandbox.execute( - 'for remote_name in $(git remote); do git remote remove "${remote_name}"; done' - ) - if exit_code != 0: - logger.error(f'Failed to remove remote: {output}') - sys.exit(1) - except Exception: - raise - finally: - # restore workspace_base and workspace_mount_path - config.workspace_base = old_workspace_base - config.workspace_mount_path = old_workspace_mount_path - return sandbox - - def get_diff_patch(self): - # add everything to the index - exit_code, output = self.execute(f'cd /workspace/{self.workspace_dir_name}') - if exit_code != 0: - logger.error('Failed to cd to the repo') - return '' - - exit_code, _output = self.execute('git config --global core.pager ""') - if exit_code != 0: - logger.error('Failed to change git config') - return '' - - # add everything to the index - exit_code, output = self.execute('git add -A') - if exit_code != 0: - logger.error('Failed to add everything to the index') - return '' - - # get the git diff - exit_code, git_patch = self.execute( - f'git diff --no-color --cached {self.swe_instance["base_commit"]}' - ) - if exit_code != 0: - logger.error('Failed to get git diff') - return '' - return git_patch - - -if __name__ == '__main__': - config = load_app_config() - - # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing - # so we don't need to manage file uploading to OpenDevin's repo - dataset = load_dataset('princeton-nlp/SWE-bench_Lite') - swe_bench_tests = dataset['test'].to_pandas() - USE_INSTANCE_IMAGE = os.environ.get('USE_INSTANCE_IMAGE', 'false') == 'true' - logger.info(f'USE_INSTANCE_IMAGE: {USE_INSTANCE_IMAGE}') - - # INSTANCE_ID = 'django__django-11099' - INSTANCE_ID = 'astropy__astropy-12907' - swe_bench_tests = swe_bench_tests[swe_bench_tests['instance_id'] == INSTANCE_ID] - EXAMPLE_INSTANCE = swe_bench_tests.iloc[0].to_dict() - - sandbox = SWEBenchSSHBox.get_box_for_instance( - config=config, - instance=EXAMPLE_INSTANCE, - sandbox_plugins=[AgentSkillsRequirement(), JupyterRequirement()], - use_instance_image=USE_INSTANCE_IMAGE, - ) - - # PRE TEST - exit_code, output = sandbox.execute('cd $REPO_PATH') - assert exit_code == 0, 'Failed to cd $REPO_PATH' - logger.info(f'cd $REPO_PATH: {output}') - - # apply test patch - exit_code, output = sandbox.execute('git apply $SWE_TASK_DIR/test.patch') - assert exit_code == 0, 'Failed to apply test patch' - logger.info(f'git apply $SWE_TASK_DIR/test.patch: {output}') - - # TEST - exit_code, output = sandbox.execute('$TEST_CMD') - assert exit_code == 1, 'Expected exit code 1 (since this is a FAIL_TO_PASS)' - logger.info(f'$TEST_CMD:\n{output}') - - # apply gold patch - exit_code, output = sandbox.execute('git apply $SWE_TASK_DIR/gold.patch') - logger.info('exit code: %d', exit_code) - logger.info(f'git apply $SWE_TASK_DIR/gold.patch: {output}') - - # TEST - exit_code, output = sandbox.execute('$TEST_CMD') - assert exit_code == 0, 'Expected exit code 0 (since we applied the gold patch)' - logger.info(f'$TEST_CMD:\n{output}') - - # Reset the repo - exit_code, output = sandbox.execute('git reset --hard') - assert exit_code == 0, 'Failed to reset the repo' - logger.info(f'git reset --hard: {output}') - - sys.stdout.flush() - try: - while True: - try: - user_input = input('>>> ') - except EOFError: - logger.info('Exiting...') - break - if user_input.lower() == 'exit': - logger.info('Exiting...') - break - exit_code, output = sandbox.execute(user_input) - logger.info('exit code: %d', exit_code) - logger.info(output) - sys.stdout.flush() - except KeyboardInterrupt: - logger.info('Exiting...') - sandbox.close() diff --git a/evaluation/toolqa/Dockerfile b/evaluation/toolqa/Dockerfile new file mode 100644 index 000000000000..a15b774fcfd5 --- /dev/null +++ b/evaluation/toolqa/Dockerfile @@ -0,0 +1,17 @@ +FROM ubuntu:22.04 + +RUN apt-get update && apt-get install -y python3 python3-pip + +RUN mkdir /workspace +WORKDIR /workspace + + +COPY data/ /workspace/data/ +COPY tools/ /workspace/tools/ + +# TODO: NEED TO FIGURE DEPENDECIES FOR THESE TOOLS + +# pushd evaluation/toolqa +# mkdir data +# python3 -c "from utils import download_data, download_tools; download_data('/workspace'); download_tools('/workspace')" +# docker build --network host -t xingyaoww/od-eval-toolqa . diff --git a/evaluation/toolqa/README.md b/evaluation/toolqa/README.md index 058ac96beeac..9e9dba391533 100644 --- a/evaluation/toolqa/README.md +++ b/evaluation/toolqa/README.md @@ -2,13 +2,9 @@ This folder contains an evaluation harness we built on top of the original [ToolQA](https://github.com/night-chen/ToolQA) ([paper](https://arxiv.org/pdf/2306.13304)). -## Setup Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local development environment for OpenDevin. - -## Configure OpenDevin and your LLM - -Run `make setup-config` to set up the `config.toml` file if it does not exist at the root of the workspace. +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Run Inference on ToolQA Instances diff --git a/evaluation/toolqa/run_infer.py b/evaluation/toolqa/run_infer.py index 7653d35568ff..930f1199e672 100644 --- a/evaluation/toolqa/run_infer.py +++ b/evaluation/toolqa/run_infer.py @@ -1,29 +1,31 @@ import asyncio -import logging import os -import pathlib from typing import Any import pandas as pd +from evaluation.toolqa.utils import encode_question, eval_answer, get_data from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, codeact_user_response, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, get_parser, load_app_config -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + get_parser, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM - -from .utils import download_data, download_tools, encode_question, eval_answer, get_data - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import CmdRunAction +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.runtime import Runtime AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = { 'CodeActAgent': codeact_user_response, @@ -34,59 +36,83 @@ } -def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = True): - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - # create process-specific workspace dir - # we will create a workspace directory for EACH process - # so that different agent don't interfere with each other. - workspace_mount_path = config.workspace_mount_path - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) +def get_config( + metadata: EvalMetadata, +) -> AppConfig: + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime(runtime: Runtime): + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = CmdRunAction(command='cd /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + await runtime.add_env_vars({'WOLFRAM_ALPHA_APPID': args.wolfram_alpha_appid}) + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + + +async def process_instance( + instance: Any, metadata: EvalMetadata, reset_logger: bool = True +): + config = get_config(metadata) - # Setup the logger properly, so you can run multi-processing to parallelize the evaluation - eval_output_dir = metadata.eval_output_dir qid = instance.qid question = instance.question answer = instance.answer + + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join(eval_output_dir, 'logs', f'instance_{qid}.log') - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {qid}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) - logger.info(f'Process-specific workspace mounted at {workspace_mount_path}') + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, qid, log_dir) + else: + logger.info(f'Starting evaluation for instance {qid}.') # Prepare instruction instruction = encode_question(question) instruction += 'IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.\n' # NOTE: You can actually set slightly different instruction for different agents - instruction += AGENT_CLS_TO_INST_SUFFIX[agent.__class__.__name__] - # logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'}) + instruction += AGENT_CLS_TO_INST_SUFFIX[metadata.agent_class] + logger.info(f'Instruction:\n{instruction}', extra={'msg_type': 'OBSERVATION'}) + + runtime = await create_runtime(config, sid=qid) + await initialize_runtime(runtime) # Here's how you can run the agent (similar to the `main` function) and get the final task state - state: State | None = asyncio.run( - run_agent_controller( - agent, - instruction, - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[ - agent.__class__.__name__ - ], - sid=qid, - ) + state: State | None = await run_controller( + config=config, + task_str=instruction, + runtime=runtime, + fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[metadata.agent_class], ) # ======= Attempt to evaluate the agent's edits ======= # If you are working on simpler benchmark that only evaluates the final model output (e.g., in a MessageAction) @@ -110,17 +136,17 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = histories = state.history.compatibility_for_eval_history_pairs() # Save the output - output = { - 'qid': qid, - 'text': model_answer_raw, - 'correct': correct, - 'answer_id': 'None', - 'model_id': metadata.model_name, - 'metadata': metadata, - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - } + output = EvalOutput( + instance_id=qid, + test_result={ + 'model_answer_raw': model_answer_raw, + 'correct': correct, + }, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + ) return output @@ -145,8 +171,12 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = default='YOUR_WOLFRAMALPHA_APPID', ) args, _ = parser.parse_known_args() - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') dataset = '' hardness = '' @@ -168,14 +198,9 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = if args.hardness not in ['easy', 'hard']: raise ValueError('Please choose from easy and hard for hardness.') - # workspace_mount_path = os.path.join(config.workspace_mount_path, '_eval_workspace') - workspace_mount_path = config.workspace_mount_path - pathlib.Path(workspace_mount_path).mkdir(parents=True, exist_ok=True) toolqa_test = pd.DataFrame(get_data(dataset, hardness)) - toolqa_data_path = download_data(workspace_mount_path) - toolqa_tool_path = download_tools(workspace_mount_path, args.wolfram_alpha_appid) + toolqa_test.rename(columns={'qid': 'instance_id'}, inplace=True) - id_column = 'qid' metadata = make_metadata( llm_config, f'toolqa-{args.dataset}-{args.hardness}', @@ -184,12 +209,9 @@ def process_instance(instance: Any, metadata: EvalMetadata, reset_logger: bool = args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(toolqa_test, output_file, args.eval_n_limit, id_column) - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(toolqa_test, output_file, args.eval_n_limit) + asyncio.run( + run_evaluation( + instances, metadata, output_file, args.eval_num_workers, process_instance + ) ) diff --git a/evaluation/toolqa/utils.py b/evaluation/toolqa/utils.py index 4155b74381de..c942de5cf87a 100644 --- a/evaluation/toolqa/utils.py +++ b/evaluation/toolqa/utils.py @@ -4,11 +4,12 @@ import string import zipfile -import gdown import requests def download_data(dir): + import gdown + data_path = os.path.join(dir, 'data/external_corpus') if os.path.exists(data_path): return data_path @@ -19,6 +20,7 @@ def download_data(dir): zip_ref.extractall(os.path.join(dir, 'data')) if os.path.exists(zip_path): os.remove(zip_path) + print(f'Data saved to {data_path}') return data_path @@ -42,6 +44,7 @@ def download_tools(dir, wolfram_alpha_appid='YOUR_WOLFRAMALPHA_APPID'): output_file = os.path.join(tool_path, tool.split('/')[1]) with open(output_file, 'wb') as f: f.write(response.content) + print(f'Tool saved to {output_file}') with open(os.path.join(tool_path, 'calculator.py'), 'r') as f: content = f.read() new_content = content.replace('YOUR_WOLFRAMALPHA_APPID', wolfram_alpha_appid) @@ -64,14 +67,29 @@ def download_tools(dir, wolfram_alpha_appid='YOUR_WOLFRAMALPHA_APPID'): f.write(new_content) +LOCAL_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') + + def get_data(dataset, hardness): - data = [] - url = f'https://raw.githubusercontent.com/night-chen/ToolQA/main/data/questions/{hardness}/{dataset}-{hardness}.jsonl' - url = requests.get(url) - if url.status_code == 200: - lines = url.text.splitlines() - for line in lines: - data.append(json.loads(line)) + data_path = os.path.join(LOCAL_DATA_DIR, f'{dataset}-{hardness}.jsonl') + if os.path.exists(data_path): + print(f'Loading data from {data_path}') + with open(data_path, 'r') as f: + return json.load(f) + else: + print( + f'Downloading data from https://raw.githubusercontent.com/night-chen/ToolQA/main/data/questions/{hardness}/{dataset}-{hardness}.jsonl' + ) + data = [] + url = f'https://raw.githubusercontent.com/night-chen/ToolQA/main/data/questions/{hardness}/{dataset}-{hardness}.jsonl' + url = requests.get(url) + if url.status_code == 200: + lines = url.text.splitlines() + for line in lines: + data.append(json.loads(line)) + with open(data_path, 'w') as f: + json.dump(data, f) + print(f'Data saved to {data_path}') return data diff --git a/evaluation/utils/shared.py b/evaluation/utils/shared.py index 8b3fe33b9371..771157f155a4 100644 --- a/evaluation/utils/shared.py +++ b/evaluation/utils/shared.py @@ -1,12 +1,13 @@ +import asyncio import json +import logging import multiprocessing as mp import os import pathlib import subprocess import time -from asyncio.log import logger from concurrent.futures import ProcessPoolExecutor -from typing import Any, Callable +from typing import Any, Awaitable, Callable import pandas as pd from pydantic import BaseModel @@ -14,6 +15,8 @@ from opendevin.controller.state.state import State from opendevin.core.config import LLMConfig +from opendevin.core.logger import get_console_handler +from opendevin.core.logger import opendevin_logger as logger from opendevin.events.action import Action from opendevin.events.action.message import MessageAction @@ -38,6 +41,31 @@ def model_dump_json(self, *args, **kwargs): return json.dumps(dumped_dict) +class EvalOutput(BaseModel): + # NOTE: User-specified + instance_id: str + instruction: str + # output of the evaluation + # store anything that is needed for the score calculation + test_result: dict[str, Any] + + # Interaction info + metadata: EvalMetadata + history: list[tuple[dict[str, Any], dict[str, Any]]] + metrics: dict[str, Any] + error: str | None = None + + # Optionally save the input test instance + instance: dict[str, Any] | None = None + + def model_dump_json(self, *args, **kwargs): + dumped = super().model_dump_json(*args, **kwargs) + dumped_dict = json.loads(dumped) + # Apply custom serialization for metadata (to avoid leaking sensitive information) + dumped_dict['metadata'] = json.loads(self.metadata.model_dump_json()) + return json.dumps(dumped_dict) + + def codeact_user_response( state: State, encapsulate_solution: bool = False, @@ -136,7 +164,11 @@ def make_metadata( return metadata -def prepare_dataset(dataset: pd.DataFrame, output_file, eval_n_limit, id_column): +def prepare_dataset(dataset: pd.DataFrame, output_file: str, eval_n_limit: int): + assert ( + 'instance_id' in dataset.columns + ), "Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column." + id_column = 'instance_id' logger.info(f'Writing evaluation output to {output_file}') finished_ids = set() if os.path.exists(output_file): @@ -164,14 +196,16 @@ def prepare_dataset(dataset: pd.DataFrame, output_file, eval_n_limit, id_column) return pd.DataFrame(new_dataset) -def run_evaluation( +async def run_evaluation( dataset: pd.DataFrame, metadata: EvalMetadata, output_file: str, num_workers: int, - process_instance_func: Callable[[pd.Series, EvalMetadata, bool], Any], - id_column: str, + process_instance_func: Callable[ + [pd.Series, EvalMetadata, bool], Awaitable[EvalOutput] + ], ): + use_multiprocessing = num_workers > 1 logger.info( f'Evaluation started with Agent {metadata.agent_class}, ' f'model {metadata.llm_config.model}, max iterations {metadata.max_iterations}.' @@ -179,35 +213,77 @@ def run_evaluation( pbar = tqdm(total=len(dataset)) output_fp = open(output_file, 'a') - def update_progress(future): + async def update_progress(future): pbar.update(1) - output = future.result() - pbar.set_description(f'Instance {output[id_column]}') - pbar.set_postfix_str(f'Test Result: {output["test_result"]["result"]}') + output: EvalOutput = await future if use_multiprocessing else future + + pbar.set_description(f'Instance {output.instance_id}') + pbar.set_postfix_str(f'Test Result: {output.test_result}') logger.info( - f'Finished evaluation for instance {output[id_column]}: {output["test_result"]["result"]}' + f'Finished evaluation for instance {output.instance_id}: {output.test_result}' ) - output_fp.write(json.dumps(output) + '\n') + output_fp.write(json.dumps(output.model_dump()) + '\n') output_fp.flush() try: - with ProcessPoolExecutor(num_workers) as executor: - futures = [] + if use_multiprocessing: + with ProcessPoolExecutor(num_workers) as executor: + loop = asyncio.get_event_loop() + futures = [] + for _, instance in dataset.iterrows(): + future = loop.run_in_executor( + executor, + process_instance_func, + instance, + metadata, + bool(num_workers > 1), + ) + futures.append(update_progress(future)) + + await asyncio.gather(*futures) + # Use plain for loop for single process for easier debugging + else: + assert num_workers == 1 for _, instance in dataset.iterrows(): - future = executor.submit( - process_instance_func, - instance, - metadata, - bool(num_workers > 1), - ) - future.add_done_callback(update_progress) - futures.append(future) - - for future in futures: - future.result() + output = await process_instance_func(instance, metadata, False) + await update_progress(output) + except KeyboardInterrupt: print('KeyboardInterrupt received. Cleaning up...') cleanup() output_fp.close() logger.info('Evaluation finished.') + + +def reset_logger_for_multiprocessing( + logger: logging.Logger, instance_id: str, log_dir: str +): + """Reset the logger for multiprocessing. + + Save logs to a separate file for each process, instead of trying to write to the + same file/console from multiple processes. + """ + # Set up logger + log_file = os.path.join( + log_dir, + f'instance_{instance_id}.log', + ) + # Remove all existing handlers from logger + for handler in logger.handlers[:]: + logger.removeHandler(handler) + # add back the console handler to print ONE line + logger.addHandler(get_console_handler()) + logger.info( + f'Starting evaluation for instance {instance_id}.\n' + f'Hint: run "tail -f {log_file}" to see live logs in a separate shell' + ) + # Remove all existing handlers from logger + for handler in logger.handlers[:]: + logger.removeHandler(handler) + os.makedirs(os.path.dirname(log_file), exist_ok=True) + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter( + logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + ) + logger.addHandler(file_handler) diff --git a/evaluation/utils/version_control.sh b/evaluation/utils/version_control.sh index f58f9bdce9f1..2ab2ada48493 100644 --- a/evaluation/utils/version_control.sh +++ b/evaluation/utils/version_control.sh @@ -1,13 +1,11 @@ checkout_eval_branch() { if [ -z "$COMMIT_HASH" ]; then echo "Commit hash not specified, use current git commit" - build_sandbox return 0 fi if git diff --quiet $COMMIT_HASH HEAD; then echo "The given hash is equivalent to the current HEAD" - build_sandbox return 0 fi @@ -30,14 +28,8 @@ checkout_eval_branch() { # Trap the EXIT signal to checkout original branch trap checkout_original_branch EXIT - build_sandbox } -build_sandbox() { - echo "Build sandbox locally" - docker build -t eval-sandbox -f containers/sandbox/Dockerfile /tmp - export SANDBOX_CONTAINER_IMAGE="eval-sandbox" -} checkout_original_branch() { if [ -z "$current_branch" ]; then diff --git a/evaluation/webarena/README.md b/evaluation/webarena/README.md index cb720a2ffc9c..2ed3f214bab1 100644 --- a/evaluation/webarena/README.md +++ b/evaluation/webarena/README.md @@ -2,59 +2,14 @@ This folder contains evaluation for [WebArena](https://github.com/web-arena-x/webarena) benchmark, powered by [BrowserGym](https://github.com/ServiceNow/BrowserGym) for easy evaluation of how well an agent capable of browsing can perform on realistic web browsing tasks. -## Setup OpenDevin Environment +## Setup Environment and LLM Configuration -Please follow [this document](https://github.com/OpenDevin/OpenDevin/blob/main/Development.md) to setup local develop environment for OpenDevin. - -## Configure OpenDevin and your LLM - -Create a `config.toml` file if it does not exist at the root of the workspace. - -Add the following configurations: - -```toml -[core] -max_iterations = 100 -cache_dir = "/tmp/cache" -ssh_hostname = "localhost" - -[sandbox] -box_type = "ssh" -timeout = 120 - -# TODO: Change these to the model you want to evaluate -[eval_gpt4_1106_preview] -model = "gpt-4-1106-preview" -api_key = "XXX" -temperature = 0.0 - -[eval_some_openai_compatible_model] -model = "openai/MODEL_NAME" -base_url = "https://OPENAI_COMPATIBLE_URL/v1" -api_key = "XXX" -temperature = 0.0 -``` +Please follow instruction [here](../README.md#setup) to setup your local development environment and LLM. ## Setup WebArena Environment WebArena requires you to set up websites containing pre-populated content that is accessible via URL to the machine running the OpenDevin agents. Follow [this document](https://github.com/web-arena-x/webarena/blob/main/environment_docker/README.md) to set up your own WebArena environment through local servers or AWS EC2 instances. -Take note of the base URL of the machine where the environment is installed. - -## Setup Environment Variables of WebArena Websites - -Create a script `webarena_env.sh` under `evaluation/webarena/scripts` with the following: - -```bash -export BASE_URL= -export SHOPPING="$BASE_URL:7770/" -export SHOPPING_ADMIN="$BASE_URL:7780/admin" -export REDDIT="$BASE_URL:9999" -export GITLAB="$BASE_URL:8023" -export WIKIPEDIA="$BASE_URL:8888/wikipedia_en_all_maxi_2022-05/A/User:The_other_Kiwix_guy/Landing" -export MAP="$BASE_URL:3000" -export HOMEPAGE="$BASE_URL:4399" -export OPENAI_API_KEY="yourkey" # this key is required for some WebArena validators that utilize LLMs -``` +Take note of the base URL (`$WEBARENA_BASE_URL`) of the machine where the environment is installed. ## Test if your environment works @@ -65,7 +20,9 @@ Follow the WebArena environment setup guide carefully, and make sure the URL fie ## Run Evaluation -```sh +```bash +export WEBARENA_BASE_URL= +export OPENAI_API_KEY="yourkey" # this key is required for some WebArena validators that utilize LLMs bash evaluation/webarena/scripts/run_infer.sh ``` diff --git a/evaluation/webarena/run_infer.py b/evaluation/webarena/run_infer.py index 4e6d181566cf..f661a147c93b 100644 --- a/evaluation/webarena/run_infer.py +++ b/evaluation/webarena/run_infer.py @@ -1,7 +1,7 @@ import asyncio import json -import logging import os +from typing import Any import browsergym.webarena # noqa F401 register webarena tasks as gym environments import gymnasium as gym @@ -9,86 +9,146 @@ from evaluation.utils.shared import ( EvalMetadata, + EvalOutput, make_metadata, prepare_dataset, + reset_logger_for_multiprocessing, run_evaluation, ) -from opendevin.controller.agent import Agent from opendevin.controller.state.state import State -from opendevin.core.config import get_llm_config_arg, load_app_config, parse_arguments -from opendevin.core.logger import get_console_handler +from opendevin.core.config import ( + AppConfig, + SandboxConfig, + get_llm_config_arg, + parse_arguments, +) from opendevin.core.logger import opendevin_logger as logger -from opendevin.core.main import run_agent_controller -from opendevin.llm.llm import LLM -from opendevin.runtime.docker.ssh_box import DockerSSHBox -from opendevin.runtime.tools import RuntimeTool - -config = load_app_config() +from opendevin.core.main import create_runtime, run_controller +from opendevin.events.action import ( + BrowseInteractiveAction, + CmdRunAction, + MessageAction, +) +from opendevin.events.observation import CmdOutputObservation +from opendevin.runtime.browser.browser_env import ( + BROWSER_EVAL_GET_GOAL_ACTION, + BROWSER_EVAL_GET_REWARDS_ACTION, +) +from opendevin.runtime.runtime import Runtime SUPPORTED_AGENT_CLS = {'BrowsingAgent'} -docker_ssh_box: DockerSSHBox | None = None - - -def get_sandbox(): - global docker_ssh_box - if docker_ssh_box is None: - docker_ssh_box = DockerSSHBox() - return docker_ssh_box +def get_config( + metadata: EvalMetadata, + env_id: str, +) -> AppConfig: + base_url = os.environ.get('WEBARENA_BASE_URL', None) + openai_api_key = os.environ.get('OPENAI_API_KEY', None) + assert base_url is not None, 'WEBARENA_BASE_URL must be set' + assert openai_api_key is not None, 'OPENAI_API_KEY must be set' + + config = AppConfig( + default_agent=metadata.agent_class, + run_as_devin=False, + runtime='eventstream', + max_iterations=metadata.max_iterations, + sandbox=SandboxConfig( + container_image='python:3.11-bookworm', + enable_auto_lint=True, + use_host_network=False, + browsergym_eval_env=env_id, + od_runtime_startup_env_vars={ + 'BASE_URL': base_url, + 'OPENAI_API_KEY': openai_api_key, + 'SHOPPING': f'{base_url}:7770/', + 'SHOPPING_ADMIN': f'{base_url}:7780/admin', + 'REDDIT': f'{base_url}:9999', + 'GITLAB': f'{base_url}:8023', + 'WIKIPEDIA': f'{base_url}:8888/wikipedia_en_all_maxi_2022-05/A/User:The_other_Kiwix_guy/Landing', + 'MAP': f'{base_url}:3000', + 'HOMEPAGE': f'{base_url}:4399', + }, + ), + # do not mount workspace + workspace_base=None, + workspace_mount_path=None, + ) + config.set_llm_config(metadata.llm_config) + return config + + +async def initialize_runtime( + runtime: Runtime, +) -> dict: + """Initialize the runtime for the agent. + + This function is called before the runtime is used to run the agent. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Initialization Fn {'-' * 50}") + obs: CmdOutputObservation + + # Set instance id + action = CmdRunAction(command='mkdir -p /workspace') + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + assert obs.exit_code == 0 + + action = BrowseInteractiveAction(browser_actions=BROWSER_EVAL_GET_GOAL_ACTION) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + goal = obs.content + + logger.info(f"{'-' * 50} END Runtime Initialization Fn {'-' * 50}") + return goal + + +async def complete_runtime( + runtime: Runtime, +) -> dict[str, Any]: + """Complete the runtime for the agent. + + This function is called before the runtime is used to run the agent. + If you need to do something in the sandbox to get the correctness metric after + the agent has run, modify this function. + """ + logger.info(f"{'-' * 50} BEGIN Runtime Completion Fn {'-' * 50}") + obs: CmdOutputObservation + + action = BrowseInteractiveAction(browser_actions=BROWSER_EVAL_GET_REWARDS_ACTION) + logger.info(action, extra={'msg_type': 'ACTION'}) + obs = await runtime.run_action(action) + logger.info(obs, extra={'msg_type': 'OBSERVATION'}) + + logger.info(f"{'-' * 50} END Runtime Completion Fn {'-' * 50}") + return { + 'rewards': json.loads(obs.content), + } -def process_instance( +async def process_instance( instance: pd.Series, metadata: EvalMetadata, reset_logger: bool = True, ): - # Create the agent - agent = Agent.get_cls(metadata.agent_class)(llm=LLM(config=metadata.llm_config)) - env_id = instance.id + env_id = instance.instance_id + config = get_config(metadata, env_id) + # Setup the logger properly, so you can run multi-processing to parallelize the evaluation if reset_logger: - # Set up logger - log_file = os.path.join( - metadata.eval_output_dir, 'logs', f'instance_{env_id}.log' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - # add back the console handler to print ONE line - logger.addHandler(get_console_handler()) - logger.info( - f'Starting evaluation for instance {env_id}.\nHint: run "tail -f {log_file}" to see live logs in a separate shell' - ) - # Remove all existing handlers from logger - for handler in logger.handlers[:]: - logger.removeHandler(handler) - file_handler = logging.FileHandler(log_file) - file_handler.setFormatter( - logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - ) - logger.addHandler(file_handler) + log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs') + reset_logger_for_multiprocessing(logger, env_id, log_dir) else: logger.info(f'Starting evaluation for instance {env_id}.') - # Here's how you can run the agent (similar to the `main` function) and get the final task state - runtime_tools_config = { - RuntimeTool.BROWSER: { - 'browsergym_eval': env_id, - 'browsergym_eval_save_dir': metadata.eval_output_dir, - } - } + runtime = await create_runtime(config, sid=env_id) + task_str = await initialize_runtime(runtime) - state: State | None = asyncio.run( - run_agent_controller( - agent, - 'PLACEHOLDER_GOAL', - max_iterations=metadata.max_iterations, - max_budget_per_task=config.max_budget_per_task, - runtime_tools_config=runtime_tools_config, - sandbox=get_sandbox(), - sid=env_id, - ) + state: State | None = await run_controller( + config=config, + task_str=task_str, + runtime=runtime, ) # ======= Attempt to evaluate the agent's environment impact ======= @@ -100,18 +160,17 @@ def process_instance( raise ValueError('State should not be None.') metrics = state.metrics.get() if state.metrics else None - browsergym_eval_dir = os.path.join(metadata.eval_output_dir, env_id.split('/')[1]) - # read goal - with open( - os.path.join(browsergym_eval_dir, 'goal.txt'), 'r', encoding='utf-8' - ) as f: - instruction = f.read() - # read reward - with open( - os.path.join(browsergym_eval_dir, 'rewards.json'), 'r', encoding='utf-8' - ) as f: - rewards = json.load(f) - reward = max(rewards) + + # Instruction is the first message from the USER + instruction = '' + for event in state.history.get_events(): + if isinstance(event, MessageAction): + instruction = event.content + break + + return_val = await complete_runtime(runtime) + logger.info(f'Return value from complete_runtime: {return_val}') + reward = max(return_val['rewards']) # history is now available as a stream of events, rather than list of pairs of (Action, Observation) # for compatibility with the existing output format, we can remake the pairs here @@ -119,39 +178,38 @@ def process_instance( histories = state.history.compatibility_for_eval_history_pairs() # Save the output - output = { - 'instance_id': env_id, - 'instruction': instruction, - 'metadata': metadata.model_dump(), - 'history': histories, - 'metrics': metrics, - 'error': state.last_error if state and state.last_error else None, - 'test_result': reward, - } - + output = EvalOutput( + instance_id=env_id, + instruction=instruction, + metadata=metadata, + history=histories, + metrics=metrics, + error=state.last_error if state and state.last_error else None, + test_result={ + 'reward': reward, + }, + ) return output if __name__ == '__main__': args = parse_arguments() - env_ids = [ - id for id in gym.envs.registry.keys() if id.startswith('browsergym/webarena') - ] - dataset = pd.DataFrame( { - 'id': [ + 'instance_id': [ id for id in gym.envs.registry.keys() - if id.startswith('browsergym/miniwob') + if id.startswith('browsergym/webarena') ] } ) - id_column = 'id' - llm_config = get_llm_config_arg(args.llm_config) if args.llm_config else config.llm - logger.info(f'Config for evaluation: {config}') + llm_config = None + if args.llm_config: + llm_config = get_llm_config_arg(args.llm_config) + if llm_config is None: + raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}') metadata = make_metadata( llm_config, @@ -162,13 +220,14 @@ def process_instance( args.eval_output_dir, ) output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl') - instances = prepare_dataset(dataset, output_file, args.eval_n_limit, id_column) - _ = get_sandbox() # Initialize the sandbox - run_evaluation( - instances, - metadata, - output_file, - args.eval_num_workers, - process_instance, - id_column, + instances = prepare_dataset(dataset, output_file, args.eval_n_limit) + + asyncio.run( + run_evaluation( + instances, + metadata, + output_file, + args.eval_num_workers, + process_instance, + ) ) diff --git a/frontend/README.md b/frontend/README.md index 01530e63e222..4e6b58c79f4e 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -1,5 +1,11 @@ # Getting Started with the OpenDevin Frontend +The frontend code can be run against the docker image defined in the [Main README](../README.md) as a backend + +## Prerequisites + +A recent version of NodeJS / NPM (`brew install node`) + ## Available Scripts In the project directory, you can run: diff --git a/frontend/package-lock.json b/frontend/package-lock.json index fe1c707dc141..8e9382e4d607 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -11,7 +11,7 @@ "@monaco-editor/react": "^4.6.0", "@nextui-org/react": "^2.4.6", "@react-types/shared": "^3.24.1", - "@reduxjs/toolkit": "^2.2.6", + "@reduxjs/toolkit": "^2.2.7", "@vitejs/plugin-react": "^4.3.1", "@xterm/addon-fit": "^0.10.0", "@xterm/xterm": "^5.4.0", @@ -26,29 +26,29 @@ "react-dom": "^18.3.1", "react-highlight": "^0.15.0", "react-hot-toast": "^2.4.1", - "react-i18next": "^15.0.0", + "react-i18next": "^15.0.1", "react-icons": "^5.2.1", "react-markdown": "^9.0.1", "react-redux": "^9.1.2", "react-syntax-highlighter": "^15.5.0", "tailwind-merge": "^2.4.0", - "vite": "^5.3.5", + "vite": "^5.4.0", "web-vitals": "^3.5.2" }, "devDependencies": { - "@tailwindcss/typography": "^0.5.13", + "@tailwindcss/typography": "^0.5.14", "@testing-library/jest-dom": "^6.4.8", "@testing-library/react": "^16.0.0", "@testing-library/user-event": "^14.5.2", - "@types/node": "^20.14.12", + "@types/node": "^22.1.0", "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/react-highlight": "^0.12.8", "@types/react-syntax-highlighter": "^15.5.13", - "@typescript-eslint/eslint-plugin": "^7.17.0", - "@typescript-eslint/parser": "^7.17.0", + "@typescript-eslint/eslint-plugin": "^7.18.0", + "@typescript-eslint/parser": "^7.18.0", "@vitest/coverage-v8": "^1.6.0", - "autoprefixer": "^10.4.19", + "autoprefixer": "^10.4.20", "eslint": "^8.57.0", "eslint-config-airbnb": "^19.0.4", "eslint-config-airbnb-typescript": "^18.0.0", @@ -58,29 +58,20 @@ "eslint-plugin-prettier": "^5.2.1", "eslint-plugin-react": "^7.35.0", "eslint-plugin-react-hooks": "^4.6.2", - "husky": "^9.1.2", + "husky": "^9.1.4", "jsdom": "^24.1.1", - "lint-staged": "^15.2.7", - "postcss": "^8.4.40", + "lint-staged": "^15.2.8", + "postcss": "^8.4.41", "prettier": "^3.3.3", - "tailwindcss": "^3.4.7", + "tailwindcss": "^3.4.9", "typescript": "^5.5.4", - "vite-tsconfig-paths": "^4.3.2", + "vite-tsconfig-paths": "^5.0.1", "vitest": "^1.6.0" }, "engines": { "node": ">=14.8.0" } }, - "node_modules/@aashutoshrathi/word-wrap": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz", - "integrity": "sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/@adobe/css-tools": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.0.tgz", @@ -111,11 +102,11 @@ } }, "node_modules/@babel/code-frame": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.6.tgz", - "integrity": "sha512-ZJhac6FkEd1yhG2AHOmfcXG4ceoLltoCVJjN5XsWN9BifBQr+cHJbWi0h68HZuSORq+3WtJ2z0hwF2NG1b5kcA==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz", + "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==", "dependencies": { - "@babel/highlight": "^7.24.6", + "@babel/highlight": "^7.24.7", "picocolors": "^1.0.0" }, "engines": { @@ -123,28 +114,28 @@ } }, "node_modules/@babel/compat-data": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.24.6.tgz", - "integrity": "sha512-aC2DGhBq5eEdyXWqrDInSqQjO0k8xtPRf5YylULqx8MCd6jBtzqfta/3ETMRpuKIc5hyswfO80ObyA1MvkCcUQ==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.25.2.tgz", + "integrity": "sha512-bYcppcpKBvX4znYaPEeFau03bp89ShqNMLs+rmdptMw+heSZh9+z84d2YG+K7cYLbWwzdjtDoW/uqZmPjulClQ==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/core": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.24.6.tgz", - "integrity": "sha512-qAHSfAdVyFmIvl0VHELib8xar7ONuSHrE2hLnsaWkYNTI68dmi1x8GYDhJjMI/e7XWal9QBlZkwbOnkcw7Z8gQ==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.25.2.tgz", + "integrity": "sha512-BBt3opiCOxUr9euZ5/ro/Xv8/V7yJ5bjYMqG/C1YAo8MIKAnumZalCN+msbci3Pigy4lIQfPUpfMM27HMGaYEA==", "dependencies": { "@ampproject/remapping": "^2.2.0", - "@babel/code-frame": "^7.24.6", - "@babel/generator": "^7.24.6", - "@babel/helper-compilation-targets": "^7.24.6", - "@babel/helper-module-transforms": "^7.24.6", - "@babel/helpers": "^7.24.6", - "@babel/parser": "^7.24.6", - "@babel/template": "^7.24.6", - "@babel/traverse": "^7.24.6", - "@babel/types": "^7.24.6", + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.0", + "@babel/helper-compilation-targets": "^7.25.2", + "@babel/helper-module-transforms": "^7.25.2", + "@babel/helpers": "^7.25.0", + "@babel/parser": "^7.25.0", + "@babel/template": "^7.25.0", + "@babel/traverse": "^7.25.2", + "@babel/types": "^7.25.2", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", @@ -168,11 +159,11 @@ } }, "node_modules/@babel/generator": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.24.6.tgz", - "integrity": "sha512-S7m4eNa6YAPJRHmKsLHIDJhNAGNKoWNiWefz1MBbpnt8g9lvMDl1hir4P9bo/57bQEmuwEhnRU/AMWsD0G/Fbg==", + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.25.0.tgz", + "integrity": "sha512-3LEEcj3PVW8pW2R1SR1M89g/qrYk/m/mB/tLqn7dn4sbBUQyTqnlod+II2U4dqiGtUmkcnAmkMDralTFZttRiw==", "dependencies": { - "@babel/types": "^7.24.6", + "@babel/types": "^7.25.0", "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25", "jsesc": "^2.5.1" @@ -182,13 +173,13 @@ } }, "node_modules/@babel/helper-compilation-targets": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.24.6.tgz", - "integrity": "sha512-VZQ57UsDGlX/5fFA7GkVPplZhHsVc+vuErWgdOiysI9Ksnw0Pbbd6pnPiR/mmJyKHgyIW0c7KT32gmhiF+cirg==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.25.2.tgz", + "integrity": "sha512-U2U5LsSaZ7TAt3cfaymQ8WHh0pxvdHoEk6HVpaexxixjyEquMh0L0YNJNM6CTGKMXV1iksi0iZkGw4AcFkPaaw==", "dependencies": { - "@babel/compat-data": "^7.24.6", - "@babel/helper-validator-option": "^7.24.6", - "browserslist": "^4.22.2", + "@babel/compat-data": "^7.25.2", + "@babel/helper-validator-option": "^7.24.8", + "browserslist": "^4.23.1", "lru-cache": "^5.1.1", "semver": "^6.3.1" }, @@ -204,58 +195,27 @@ "semver": "bin/semver.js" } }, - "node_modules/@babel/helper-environment-visitor": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.24.6.tgz", - "integrity": "sha512-Y50Cg3k0LKLMjxdPjIl40SdJgMB85iXn27Vk/qbHZCFx/o5XO3PSnpi675h1KEmmDb6OFArfd5SCQEQ5Q4H88g==", - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-function-name": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.24.6.tgz", - "integrity": "sha512-xpeLqeeRkbxhnYimfr2PC+iA0Q7ljX/d1eZ9/inYbmfG2jpl8Lu3DyXvpOAnrS5kxkfOWJjioIMQsaMBXFI05w==", - "dependencies": { - "@babel/template": "^7.24.6", - "@babel/types": "^7.24.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-hoist-variables": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.6.tgz", - "integrity": "sha512-SF/EMrC3OD7dSta1bLJIlrsVxwtd0UpjRJqLno6125epQMJ/kyFmpTT4pbvPbdQHzCHg+biQ7Syo8lnDtbR+uA==", - "dependencies": { - "@babel/types": "^7.24.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, "node_modules/@babel/helper-module-imports": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.6.tgz", - "integrity": "sha512-a26dmxFJBF62rRO9mmpgrfTLsAuyHk4e1hKTUkD/fcMfynt8gvEKwQPQDVxWhca8dHoDck+55DFt42zV0QMw5g==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.24.7.tgz", + "integrity": "sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==", "dependencies": { - "@babel/types": "^7.24.6" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-module-transforms": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.24.6.tgz", - "integrity": "sha512-Y/YMPm83mV2HJTbX1Qh2sjgjqcacvOlhbzdCCsSlblOKjSYmQqEbO6rUniWQyRo9ncyfjT8hnUjlG06RXDEmcA==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.25.2.tgz", + "integrity": "sha512-BjyRAbix6j/wv83ftcVJmBt72QtHI56C7JXZoG2xATiLpmoC7dpd8WnkikExHDVPpi/3qCmO6WY1EaXOluiecQ==", "dependencies": { - "@babel/helper-environment-visitor": "^7.24.6", - "@babel/helper-module-imports": "^7.24.6", - "@babel/helper-simple-access": "^7.24.6", - "@babel/helper-split-export-declaration": "^7.24.6", - "@babel/helper-validator-identifier": "^7.24.6" + "@babel/helper-module-imports": "^7.24.7", + "@babel/helper-simple-access": "^7.24.7", + "@babel/helper-validator-identifier": "^7.24.7", + "@babel/traverse": "^7.25.2" }, "engines": { "node": ">=6.9.0" @@ -265,77 +225,67 @@ } }, "node_modules/@babel/helper-plugin-utils": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.6.tgz", - "integrity": "sha512-MZG/JcWfxybKwsA9N9PmtF2lOSFSEMVCpIRrbxccZFLJPrJciJdG/UhSh5W96GEteJI2ARqm5UAHxISwRDLSNg==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.8.tgz", + "integrity": "sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-simple-access": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.6.tgz", - "integrity": "sha512-nZzcMMD4ZhmB35MOOzQuiGO5RzL6tJbsT37Zx8M5L/i9KSrukGXWTjLe1knIbb/RmxoJE9GON9soq0c0VEMM5g==", - "dependencies": { - "@babel/types": "^7.24.6" - }, - "engines": { - "node": ">=6.9.0" - } - }, - "node_modules/@babel/helper-split-export-declaration": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.24.6.tgz", - "integrity": "sha512-CvLSkwXGWnYlF9+J3iZUvwgAxKiYzK3BWuo+mLzD/MDGOZDj7Gq8+hqaOkMxmJwmlv0iu86uH5fdADd9Hxkymw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.24.7.tgz", + "integrity": "sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==", "dependencies": { - "@babel/types": "^7.24.6" + "@babel/traverse": "^7.24.7", + "@babel/types": "^7.24.7" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-string-parser": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.6.tgz", - "integrity": "sha512-WdJjwMEkmBicq5T9fm/cHND3+UlFa2Yj8ALLgmoSQAJZysYbBjw+azChSGPN4DSPLXOcooGRvDwZWMcF/mLO2Q==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz", + "integrity": "sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-identifier": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.6.tgz", - "integrity": "sha512-4yA7s865JHaqUdRbnaxarZREuPTHrjpDT+pXoAZ1yhyo6uFnIEpS8VMu16siFOHDpZNKYv5BObhsB//ycbICyw==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz", + "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helper-validator-option": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.6.tgz", - "integrity": "sha512-Jktc8KkF3zIkePb48QO+IapbXlSapOW9S+ogZZkcO6bABgYAxtZcjZ/O005111YLf+j4M84uEgwYoidDkXbCkQ==", + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.24.8.tgz", + "integrity": "sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==", "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/helpers": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.24.6.tgz", - "integrity": "sha512-V2PI+NqnyFu1i0GyTd/O/cTpxzQCYioSkUIRmgo7gFEHKKCg5w46+r/A6WeUR1+P3TeQ49dspGPNd/E3n9AnnA==", + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.25.0.tgz", + "integrity": "sha512-MjgLZ42aCm0oGjJj8CtSM3DB8NOOf8h2l7DCTePJs29u+v7yO/RBX9nShlKMgFnRks/Q4tBAe7Hxnov9VkGwLw==", "dependencies": { - "@babel/template": "^7.24.6", - "@babel/types": "^7.24.6" + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/highlight": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.6.tgz", - "integrity": "sha512-2YnuOp4HAk2BsBrJJvYCbItHx0zWscI1C3zgWkz+wDyD9I7GIVrfnLyrR4Y1VR+7p+chAEcrgRQYZAGIKMV7vQ==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz", + "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==", "dependencies": { - "@babel/helper-validator-identifier": "^7.24.6", + "@babel/helper-validator-identifier": "^7.24.7", "chalk": "^2.4.2", "js-tokens": "^4.0.0", "picocolors": "^1.0.0" @@ -409,9 +359,12 @@ } }, "node_modules/@babel/parser": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.6.tgz", - "integrity": "sha512-eNZXdfU35nJC2h24RznROuOpO94h6x8sg9ju0tT9biNtLZ2vuP8SduLqqV+/8+cebSLV9SJEAN5Z3zQbJG/M+Q==", + "version": "7.25.3", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.25.3.tgz", + "integrity": "sha512-iLTJKDbJ4hMvFPgQwwsVoxtHyWpKKPBrxkANrSYewDPaPpT5py5yeVkgPIJ7XYXhndxJpaA3PyALSXQ7u8e/Dw==", + "dependencies": { + "@babel/types": "^7.25.2" + }, "bin": { "parser": "bin/babel-parser.js" }, @@ -420,11 +373,11 @@ } }, "node_modules/@babel/plugin-transform-react-jsx-self": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.24.6.tgz", - "integrity": "sha512-FfZfHXtQ5jYPQsCRyLpOv2GeLIIJhs8aydpNh39vRDjhD411XcfWDni5i7OjP/Rs8GAtTn7sWFFELJSHqkIxYg==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.24.7.tgz", + "integrity": "sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==", "dependencies": { - "@babel/helper-plugin-utils": "^7.24.6" + "@babel/helper-plugin-utils": "^7.24.7" }, "engines": { "node": ">=6.9.0" @@ -434,11 +387,11 @@ } }, "node_modules/@babel/plugin-transform-react-jsx-source": { - "version": "7.24.1", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.24.1.tgz", - "integrity": "sha512-1v202n7aUq4uXAieRTKcwPzNyphlCuqHHDcdSNc+vdhoTEZcFMh+L5yZuCmGaIO7bs1nJUNfHB89TZyoL48xNA==", + "version": "7.24.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.24.7.tgz", + "integrity": "sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==", "dependencies": { - "@babel/helper-plugin-utils": "^7.24.0" + "@babel/helper-plugin-utils": "^7.24.7" }, "engines": { "node": ">=6.9.0" @@ -448,9 +401,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.24.8", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.24.8.tgz", - "integrity": "sha512-5F7SDGs1T72ZczbRwbGO9lQi0NLjQxzl6i4lJxLxfW9U5UluCSyEJeniWvnhl3/euNiqQVbo8zruhsDfid0esA==", + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.25.0.tgz", + "integrity": "sha512-7dRy4DwXwtzBrPbZflqxnvfxLF8kdZXPkhymtDeFoFqE6ldzjQFgYTtYIFARcLEYDrqfBfYcZt1WqFxRoyC9Rw==", "dependencies": { "regenerator-runtime": "^0.14.0" }, @@ -459,31 +412,28 @@ } }, "node_modules/@babel/template": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.24.6.tgz", - "integrity": "sha512-3vgazJlLwNXi9jhrR1ef8qiB65L1RK90+lEQwv4OxveHnqC3BfmnHdgySwRLzf6akhlOYenT+b7AfWq+a//AHw==", + "version": "7.25.0", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.0.tgz", + "integrity": "sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==", "dependencies": { - "@babel/code-frame": "^7.24.6", - "@babel/parser": "^7.24.6", - "@babel/types": "^7.24.6" + "@babel/code-frame": "^7.24.7", + "@babel/parser": "^7.25.0", + "@babel/types": "^7.25.0" }, "engines": { "node": ">=6.9.0" } }, "node_modules/@babel/traverse": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.24.6.tgz", - "integrity": "sha512-OsNjaJwT9Zn8ozxcfoBc+RaHdj3gFmCmYoQLUII1o6ZrUwku0BMg80FoOTPx+Gi6XhcQxAYE4xyjPTo4SxEQqw==", - "dependencies": { - "@babel/code-frame": "^7.24.6", - "@babel/generator": "^7.24.6", - "@babel/helper-environment-visitor": "^7.24.6", - "@babel/helper-function-name": "^7.24.6", - "@babel/helper-hoist-variables": "^7.24.6", - "@babel/helper-split-export-declaration": "^7.24.6", - "@babel/parser": "^7.24.6", - "@babel/types": "^7.24.6", + "version": "7.25.3", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.3.tgz", + "integrity": "sha512-HefgyP1x754oGCsKmV5reSmtV7IXj/kpaE1XYY+D9G5PvKKoFfSbiS4M77MdjuwlZKDIKFCffq9rPU+H/s3ZdQ==", + "dependencies": { + "@babel/code-frame": "^7.24.7", + "@babel/generator": "^7.25.0", + "@babel/parser": "^7.25.3", + "@babel/template": "^7.25.0", + "@babel/types": "^7.25.2", "debug": "^4.3.1", "globals": "^11.1.0" }, @@ -492,12 +442,12 @@ } }, "node_modules/@babel/types": { - "version": "7.24.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.24.6.tgz", - "integrity": "sha512-WaMsgi6Q8zMgMth93GvWPXkhAIEobfsIkLTacoVZoK1J0CevIPGYY2Vo5YvJGqyHqXM6P4ppOYGsIRU8MM9pFQ==", + "version": "7.25.2", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.25.2.tgz", + "integrity": "sha512-YTnYtra7W9e6/oAZEHj0bJehPRUlLH9/fbpT5LfB0NhQXyALCRkRs3zH9v07IYhkgpqX6Z78FnuccZr/l4Fs4Q==", "dependencies": { - "@babel/helper-string-parser": "^7.24.6", - "@babel/helper-validator-identifier": "^7.24.6", + "@babel/helper-string-parser": "^7.24.8", + "@babel/helper-validator-identifier": "^7.24.7", "to-fast-properties": "^2.0.0" }, "engines": { @@ -871,9 +821,9 @@ } }, "node_modules/@eslint-community/regexpp": { - "version": "4.10.0", - "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.10.0.tgz", - "integrity": "sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==", + "version": "4.11.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.11.0.tgz", + "integrity": "sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==", "dev": true, "engines": { "node": "^12.0.0 || ^14.0.0 || >=16.0.0" @@ -939,18 +889,6 @@ "node": "*" } }, - "node_modules/@eslint/eslintrc/node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/@eslint/js": { "version": "8.57.0", "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", @@ -1008,6 +946,7 @@ "version": "0.11.14", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "deprecated": "Use @eslint/config-array instead", "dev": true, "dependencies": { "@humanwhocodes/object-schema": "^2.0.2", @@ -1057,12 +996,13 @@ "version": "2.0.3", "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", "dev": true }, "node_modules/@internationalized/date": { - "version": "3.5.4", - "resolved": "https://registry.npmjs.org/@internationalized/date/-/date-3.5.4.tgz", - "integrity": "sha512-qoVJVro+O0rBaw+8HPjUB1iH8Ihf8oziEnqMnvhJUSuVIrHOuZ6eNLHNvzXJKUvAtaDiqMnRlg8Z2mgh09BlUw==", + "version": "3.5.5", + "resolved": "https://registry.npmjs.org/@internationalized/date/-/date-3.5.5.tgz", + "integrity": "sha512-H+CfYvOZ0LTJeeLOqm19E3uj/4YjrmOFtBufDHPfvtI80hFAMqtrp7oCACpe4Cil5l8S0Qu/9dYfZc/5lY8WQQ==", "dependencies": { "@swc/helpers": "^0.5.0" } @@ -1227,9 +1167,9 @@ } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.4.15", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz", - "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==" + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==" }, "node_modules/@jridgewell/trace-mapping": { "version": "0.3.25", @@ -3048,28 +2988,166 @@ } }, "node_modules/@react-aria/grid": { - "version": "3.9.1", - "resolved": "https://registry.npmjs.org/@react-aria/grid/-/grid-3.9.1.tgz", - "integrity": "sha512-fGEZqAEaS8mqzV/II3N4ndoNWegIcbh+L3PmKbXdpKKUP8VgMs/WY5rYl5WAF0f5RoFwXqx3ibDLeR9tKj/bOg==", + "version": "3.10.1", + "resolved": "https://registry.npmjs.org/@react-aria/grid/-/grid-3.10.1.tgz", + "integrity": "sha512-7dSgiYVQapBtPV4SIit+9fJ1qoEjtp+PXffJkWAPtGbg/jJ4b0jcVzykH7ARD4w/6jAJN/oVSfrKZqFPoLAd9w==", "dependencies": { - "@react-aria/focus": "^3.17.1", - "@react-aria/i18n": "^3.11.1", - "@react-aria/interactions": "^3.21.3", + "@react-aria/focus": "^3.18.1", + "@react-aria/i18n": "^3.12.1", + "@react-aria/interactions": "^3.22.1", "@react-aria/live-announcer": "^3.3.4", - "@react-aria/selection": "^3.18.1", - "@react-aria/utils": "^3.24.1", - "@react-stately/collections": "^3.10.7", - "@react-stately/grid": "^3.8.7", - "@react-stately/selection": "^3.15.1", - "@react-stately/virtualizer": "^3.7.1", - "@react-types/checkbox": "^3.8.1", - "@react-types/grid": "^3.2.6", - "@react-types/shared": "^3.23.1", + "@react-aria/selection": "^3.19.1", + "@react-aria/utils": "^3.25.1", + "@react-stately/collections": "^3.10.9", + "@react-stately/grid": "^3.9.1", + "@react-stately/selection": "^3.16.1", + "@react-types/checkbox": "^3.8.3", + "@react-types/grid": "^3.2.8", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0", - "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-aria/focus": { + "version": "3.18.1", + "resolved": "https://registry.npmjs.org/@react-aria/focus/-/focus-3.18.1.tgz", + "integrity": "sha512-N0Cy61WCIv+57mbqC7hiZAsB+3rF5n4JKabxUmg/2RTJL6lq7hJ5N4gx75ymKxkN8GnVDwt4pKZah48Wopa5jw==", + "dependencies": { + "@react-aria/interactions": "^3.22.1", + "@react-aria/utils": "^3.25.1", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-aria/i18n": { + "version": "3.12.1", + "resolved": "https://registry.npmjs.org/@react-aria/i18n/-/i18n-3.12.1.tgz", + "integrity": "sha512-0q3gyogF9Ekah+9LOo6tcfshxsk2Ope+KdbtFHJVhznedMxn6RpHGcVur5ImbQ1dYafA5CmjBUGJW70b56+BGA==", + "dependencies": { + "@internationalized/date": "^3.5.5", + "@internationalized/message": "^3.1.4", + "@internationalized/number": "^3.5.3", + "@internationalized/string": "^3.2.3", + "@react-aria/ssr": "^3.9.5", + "@react-aria/utils": "^3.25.1", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-aria/interactions": { + "version": "3.22.1", + "resolved": "https://registry.npmjs.org/@react-aria/interactions/-/interactions-3.22.1.tgz", + "integrity": "sha512-5TLzQaDAQQ5C70yG8GInbO4wIylKY67RfTIIwQPGR/4n5OIjbUD8BOj3NuSsuZ/frUPaBXo1VEBBmSO23fxkjw==", + "dependencies": { + "@react-aria/ssr": "^3.9.5", + "@react-aria/utils": "^3.25.1", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-aria/selection": { + "version": "3.19.1", + "resolved": "https://registry.npmjs.org/@react-aria/selection/-/selection-3.19.1.tgz", + "integrity": "sha512-mbExvq2Omi60sTWFGjwcNz1ja2P8VDsxWAqSypHRTyqXhtgqbv8V/v8Gp+7BmVPH1YHcbhztl6rvUZTDOSszzw==", + "dependencies": { + "@react-aria/focus": "^3.18.1", + "@react-aria/i18n": "^3.12.1", + "@react-aria/interactions": "^3.22.1", + "@react-aria/utils": "^3.25.1", + "@react-stately/selection": "^3.16.1", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-aria/ssr": { + "version": "3.9.5", + "resolved": "https://registry.npmjs.org/@react-aria/ssr/-/ssr-3.9.5.tgz", + "integrity": "sha512-xEwGKoysu+oXulibNUSkXf8itW0npHHTa6c4AyYeZIJyRoegeteYuFpZUBPtIDE8RfHdNsSmE1ssOkxRnwbkuQ==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "engines": { + "node": ">= 12" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-aria/utils": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.25.1.tgz", + "integrity": "sha512-5Uj864e7T5+yj78ZfLnfHqmypLiqW2mN+nsdslog2z5ssunTqjolVeM15ootXskjISlZ7MojLpq97kIC4nlnAw==", + "dependencies": { + "@react-aria/ssr": "^3.9.5", + "@react-stately/utils": "^3.10.2", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-stately/collections": { + "version": "3.10.9", + "resolved": "https://registry.npmjs.org/@react-stately/collections/-/collections-3.10.9.tgz", + "integrity": "sha512-plyrng6hOQMG8LrjArMA6ts/DgWyXln3g90/hFNbqe/hdVYF53sDVsj8Jb+5LtoYTpiAlV6eOvy1XR0vPZUf8w==", + "dependencies": { + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-stately/utils": { + "version": "3.10.2", + "resolved": "https://registry.npmjs.org/@react-stately/utils/-/utils-3.10.2.tgz", + "integrity": "sha512-fh6OTQtbeQC0ywp6LJuuKs6tKIgFvt/DlIZEcIpGho6/oZG229UnIk6TUekwxnDbumuYyan6D9EgUtEMmT8UIg==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-types/checkbox": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/@react-types/checkbox/-/checkbox-3.8.3.tgz", + "integrity": "sha512-f4c1mnLEt0iS1NMkyZXgT3q3AgcxzDk7w6MSONOKydcnh0xG5L2oefY14DhVDLkAuQS7jThlUFwiAs+MxiO3MA==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/grid/node_modules/@react-types/grid": { + "version": "3.2.8", + "resolved": "https://registry.npmjs.org/@react-types/grid/-/grid-3.2.8.tgz", + "integrity": "sha512-6PJrpukwMqlv3IhJSDkJuVbhHM8Oe6hd2supWqd9adMXrlSP7QHt9a8SgFcFblCCTx8JzUaA0PvY5sTudcEtOQ==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-aria/i18n": { @@ -3281,20 +3359,89 @@ } }, "node_modules/@react-aria/spinbutton": { - "version": "3.6.5", - "resolved": "https://registry.npmjs.org/@react-aria/spinbutton/-/spinbutton-3.6.5.tgz", - "integrity": "sha512-0aACBarF/Xr/7ixzjVBTQ0NBwwwsoGkf5v6AVFVMTC0uYMXHTALvRs+ULHjHMa5e/cX/aPlEvaVT7jfSs+Xy9Q==", + "version": "3.6.7", + "resolved": "https://registry.npmjs.org/@react-aria/spinbutton/-/spinbutton-3.6.7.tgz", + "integrity": "sha512-OCimp4yXoFIgh6WAMOls5DDDRDRO75ZFic3YA6wLWTRNHxo1Lj8S90i1A6pakY6bi4hdBCKmj4DnFSNKAw1iWg==", "dependencies": { - "@react-aria/i18n": "^3.11.1", + "@react-aria/i18n": "^3.12.1", "@react-aria/live-announcer": "^3.3.4", - "@react-aria/utils": "^3.24.1", - "@react-types/button": "^3.9.4", - "@react-types/shared": "^3.23.1", + "@react-aria/utils": "^3.25.1", + "@react-types/button": "^3.9.6", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0", - "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0", + "react-dom": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/spinbutton/node_modules/@react-aria/i18n": { + "version": "3.12.1", + "resolved": "https://registry.npmjs.org/@react-aria/i18n/-/i18n-3.12.1.tgz", + "integrity": "sha512-0q3gyogF9Ekah+9LOo6tcfshxsk2Ope+KdbtFHJVhznedMxn6RpHGcVur5ImbQ1dYafA5CmjBUGJW70b56+BGA==", + "dependencies": { + "@internationalized/date": "^3.5.5", + "@internationalized/message": "^3.1.4", + "@internationalized/number": "^3.5.3", + "@internationalized/string": "^3.2.3", + "@react-aria/ssr": "^3.9.5", + "@react-aria/utils": "^3.25.1", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/spinbutton/node_modules/@react-aria/ssr": { + "version": "3.9.5", + "resolved": "https://registry.npmjs.org/@react-aria/ssr/-/ssr-3.9.5.tgz", + "integrity": "sha512-xEwGKoysu+oXulibNUSkXf8itW0npHHTa6c4AyYeZIJyRoegeteYuFpZUBPtIDE8RfHdNsSmE1ssOkxRnwbkuQ==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "engines": { + "node": ">= 12" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/spinbutton/node_modules/@react-aria/utils": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.25.1.tgz", + "integrity": "sha512-5Uj864e7T5+yj78ZfLnfHqmypLiqW2mN+nsdslog2z5ssunTqjolVeM15ootXskjISlZ7MojLpq97kIC4nlnAw==", + "dependencies": { + "@react-aria/ssr": "^3.9.5", + "@react-stately/utils": "^3.10.2", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/spinbutton/node_modules/@react-stately/utils": { + "version": "3.10.2", + "resolved": "https://registry.npmjs.org/@react-stately/utils/-/utils-3.10.2.tgz", + "integrity": "sha512-fh6OTQtbeQC0ywp6LJuuKs6tKIgFvt/DlIZEcIpGho6/oZG229UnIk6TUekwxnDbumuYyan6D9EgUtEMmT8UIg==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/spinbutton/node_modules/@react-types/button": { + "version": "3.9.6", + "resolved": "https://registry.npmjs.org/@react-types/button/-/button-3.9.6.tgz", + "integrity": "sha512-8lA+D5JLbNyQikf8M/cPP2cji91aVTcqjrGpDqI7sQnaLFikM8eFR6l1ZWGtZS5MCcbfooko77ha35SYplSQvw==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-aria/ssr": { @@ -3391,69 +3538,163 @@ } }, "node_modules/@react-aria/toggle": { - "version": "3.10.4", - "resolved": "https://registry.npmjs.org/@react-aria/toggle/-/toggle-3.10.4.tgz", - "integrity": "sha512-bRk+CdB8QzrSyGNjENXiTWxfzYKRw753iwQXsEAU7agPCUdB8cZJyrhbaUoD0rwczzTp2zDbZ9rRbUPdsBE2YQ==", - "dependencies": { - "@react-aria/focus": "^3.17.1", - "@react-aria/interactions": "^3.21.3", - "@react-aria/utils": "^3.24.1", - "@react-stately/toggle": "^3.7.4", - "@react-types/checkbox": "^3.8.1", - "@swc/helpers": "^0.5.0" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" - } - }, - "node_modules/@react-aria/tooltip": { - "version": "3.7.4", - "resolved": "https://registry.npmjs.org/@react-aria/tooltip/-/tooltip-3.7.4.tgz", - "integrity": "sha512-+XRx4HlLYqWY3fB8Z60bQi/rbWDIGlFUtXYbtoa1J+EyRWfhpvsYImP8qeeNO/vgjUtDy1j9oKa8p6App9mBMQ==", - "dependencies": { - "@react-aria/focus": "^3.17.1", - "@react-aria/interactions": "^3.21.3", - "@react-aria/utils": "^3.24.1", - "@react-stately/tooltip": "^3.4.9", - "@react-types/shared": "^3.23.1", - "@react-types/tooltip": "^3.4.9", + "version": "3.10.6", + "resolved": "https://registry.npmjs.org/@react-aria/toggle/-/toggle-3.10.6.tgz", + "integrity": "sha512-AGlbtB1b8grrtjbiW5Au0LKYzxR83RHbHhaUkFwajyYRGyuEzr3Y03OiveoPB+DayA8Gz3H1ZVmW++8JZQOWHw==", + "dependencies": { + "@react-aria/focus": "^3.18.1", + "@react-aria/interactions": "^3.22.1", + "@react-aria/utils": "^3.25.1", + "@react-stately/toggle": "^3.7.6", + "@react-types/checkbox": "^3.8.3", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, - "node_modules/@react-aria/utils": { - "version": "3.24.1", - "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.24.1.tgz", - "integrity": "sha512-O3s9qhPMd6n42x9sKeJ3lhu5V1Tlnzhu6Yk8QOvDuXf7UGuUjXf9mzfHJt1dYzID4l9Fwm8toczBzPM9t0jc8Q==", + "node_modules/@react-aria/toggle/node_modules/@react-aria/focus": { + "version": "3.18.1", + "resolved": "https://registry.npmjs.org/@react-aria/focus/-/focus-3.18.1.tgz", + "integrity": "sha512-N0Cy61WCIv+57mbqC7hiZAsB+3rF5n4JKabxUmg/2RTJL6lq7hJ5N4gx75ymKxkN8GnVDwt4pKZah48Wopa5jw==", "dependencies": { - "@react-aria/ssr": "^3.9.4", - "@react-stately/utils": "^3.10.1", - "@react-types/shared": "^3.23.1", + "@react-aria/interactions": "^3.22.1", + "@react-aria/utils": "^3.25.1", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0", "clsx": "^2.0.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, - "node_modules/@react-aria/visually-hidden": { - "version": "3.8.12", - "resolved": "https://registry.npmjs.org/@react-aria/visually-hidden/-/visually-hidden-3.8.12.tgz", - "integrity": "sha512-Bawm+2Cmw3Xrlr7ARzl2RLtKh0lNUdJ0eNqzWcyx4c0VHUAWtThmH5l+HRqFUGzzutFZVo89SAy40BAbd0gjVw==", + "node_modules/@react-aria/toggle/node_modules/@react-aria/interactions": { + "version": "3.22.1", + "resolved": "https://registry.npmjs.org/@react-aria/interactions/-/interactions-3.22.1.tgz", + "integrity": "sha512-5TLzQaDAQQ5C70yG8GInbO4wIylKY67RfTIIwQPGR/4n5OIjbUD8BOj3NuSsuZ/frUPaBXo1VEBBmSO23fxkjw==", "dependencies": { - "@react-aria/interactions": "^3.21.3", - "@react-aria/utils": "^3.24.1", - "@react-types/shared": "^3.23.1", + "@react-aria/ssr": "^3.9.5", + "@react-aria/utils": "^3.25.1", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, - "node_modules/@react-stately/calendar": { - "version": "3.5.1", + "node_modules/@react-aria/toggle/node_modules/@react-aria/ssr": { + "version": "3.9.5", + "resolved": "https://registry.npmjs.org/@react-aria/ssr/-/ssr-3.9.5.tgz", + "integrity": "sha512-xEwGKoysu+oXulibNUSkXf8itW0npHHTa6c4AyYeZIJyRoegeteYuFpZUBPtIDE8RfHdNsSmE1ssOkxRnwbkuQ==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "engines": { + "node": ">= 12" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/toggle/node_modules/@react-aria/utils": { + "version": "3.25.1", + "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.25.1.tgz", + "integrity": "sha512-5Uj864e7T5+yj78ZfLnfHqmypLiqW2mN+nsdslog2z5ssunTqjolVeM15ootXskjISlZ7MojLpq97kIC4nlnAw==", + "dependencies": { + "@react-aria/ssr": "^3.9.5", + "@react-stately/utils": "^3.10.2", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/toggle/node_modules/@react-stately/toggle": { + "version": "3.7.6", + "resolved": "https://registry.npmjs.org/@react-stately/toggle/-/toggle-3.7.6.tgz", + "integrity": "sha512-xRZyrjNVu1VCd1xpg5RwmNYs9fXb+JHChoUaRcBmGCCjsPD0R5uR3iNuE17RXJtWS3/8o9IJVn90+/7NW7boOg==", + "dependencies": { + "@react-stately/utils": "^3.10.2", + "@react-types/checkbox": "^3.8.3", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/toggle/node_modules/@react-stately/utils": { + "version": "3.10.2", + "resolved": "https://registry.npmjs.org/@react-stately/utils/-/utils-3.10.2.tgz", + "integrity": "sha512-fh6OTQtbeQC0ywp6LJuuKs6tKIgFvt/DlIZEcIpGho6/oZG229UnIk6TUekwxnDbumuYyan6D9EgUtEMmT8UIg==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/toggle/node_modules/@react-types/checkbox": { + "version": "3.8.3", + "resolved": "https://registry.npmjs.org/@react-types/checkbox/-/checkbox-3.8.3.tgz", + "integrity": "sha512-f4c1mnLEt0iS1NMkyZXgT3q3AgcxzDk7w6MSONOKydcnh0xG5L2oefY14DhVDLkAuQS7jThlUFwiAs+MxiO3MA==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-aria/tooltip": { + "version": "3.7.4", + "resolved": "https://registry.npmjs.org/@react-aria/tooltip/-/tooltip-3.7.4.tgz", + "integrity": "sha512-+XRx4HlLYqWY3fB8Z60bQi/rbWDIGlFUtXYbtoa1J+EyRWfhpvsYImP8qeeNO/vgjUtDy1j9oKa8p6App9mBMQ==", + "dependencies": { + "@react-aria/focus": "^3.17.1", + "@react-aria/interactions": "^3.21.3", + "@react-aria/utils": "^3.24.1", + "@react-stately/tooltip": "^3.4.9", + "@react-types/shared": "^3.23.1", + "@react-types/tooltip": "^3.4.9", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + } + }, + "node_modules/@react-aria/utils": { + "version": "3.24.1", + "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.24.1.tgz", + "integrity": "sha512-O3s9qhPMd6n42x9sKeJ3lhu5V1Tlnzhu6Yk8QOvDuXf7UGuUjXf9mzfHJt1dYzID4l9Fwm8toczBzPM9t0jc8Q==", + "dependencies": { + "@react-aria/ssr": "^3.9.4", + "@react-stately/utils": "^3.10.1", + "@react-types/shared": "^3.23.1", + "@swc/helpers": "^0.5.0", + "clsx": "^2.0.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + } + }, + "node_modules/@react-aria/visually-hidden": { + "version": "3.8.12", + "resolved": "https://registry.npmjs.org/@react-aria/visually-hidden/-/visually-hidden-3.8.12.tgz", + "integrity": "sha512-Bawm+2Cmw3Xrlr7ARzl2RLtKh0lNUdJ0eNqzWcyx4c0VHUAWtThmH5l+HRqFUGzzutFZVo89SAy40BAbd0gjVw==", + "dependencies": { + "@react-aria/interactions": "^3.21.3", + "@react-aria/utils": "^3.24.1", + "@react-types/shared": "^3.23.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + } + }, + "node_modules/@react-stately/calendar": { + "version": "3.5.1", "resolved": "https://registry.npmjs.org/@react-stately/calendar/-/calendar-3.5.1.tgz", "integrity": "sha512-7l7QhqGUJ5AzWHfvZzbTe3J4t72Ht5BmhW4hlVI7flQXtfrmYkVtl3ZdytEZkkHmWGYZRW9b4IQTQGZxhtlElA==", "dependencies": { @@ -3552,18 +3793,41 @@ } }, "node_modules/@react-stately/grid": { - "version": "3.8.7", - "resolved": "https://registry.npmjs.org/@react-stately/grid/-/grid-3.8.7.tgz", - "integrity": "sha512-he3TXCLAhF5C5z1/G4ySzcwyt7PEiWcVIupxebJQqRyFrNWemSuv+7tolnStmG8maMVIyV3P/3j4eRBbdSlOIg==", + "version": "3.9.1", + "resolved": "https://registry.npmjs.org/@react-stately/grid/-/grid-3.9.1.tgz", + "integrity": "sha512-LSVIcXO/cqwG0IgDSk2juDbpARBS1IzGnsTp/8vSOejMxq5MXrwxL5hUcqNczL8Ss6aLpELm42tCS0kPm3cMKw==", "dependencies": { - "@react-stately/collections": "^3.10.7", - "@react-stately/selection": "^3.15.1", - "@react-types/grid": "^3.2.6", - "@react-types/shared": "^3.23.1", + "@react-stately/collections": "^3.10.9", + "@react-stately/selection": "^3.16.1", + "@react-types/grid": "^3.2.8", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/grid/node_modules/@react-stately/collections": { + "version": "3.10.9", + "resolved": "https://registry.npmjs.org/@react-stately/collections/-/collections-3.10.9.tgz", + "integrity": "sha512-plyrng6hOQMG8LrjArMA6ts/DgWyXln3g90/hFNbqe/hdVYF53sDVsj8Jb+5LtoYTpiAlV6eOvy1XR0vPZUf8w==", + "dependencies": { + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/grid/node_modules/@react-types/grid": { + "version": "3.2.8", + "resolved": "https://registry.npmjs.org/@react-types/grid/-/grid-3.2.8.tgz", + "integrity": "sha512-6PJrpukwMqlv3IhJSDkJuVbhHM8Oe6hd2supWqd9adMXrlSP7QHt9a8SgFcFblCCTx8JzUaA0PvY5sTudcEtOQ==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-stately/list": { @@ -3624,33 +3888,141 @@ } }, "node_modules/@react-stately/select": { - "version": "3.6.4", - "resolved": "https://registry.npmjs.org/@react-stately/select/-/select-3.6.4.tgz", - "integrity": "sha512-whZgF1N53D0/dS8tOFdrswB0alsk5Q5620HC3z+5f2Hpi8gwgAZ8TYa+2IcmMYRiT+bxVuvEc/NirU9yPmqGbA==", + "version": "3.6.6", + "resolved": "https://registry.npmjs.org/@react-stately/select/-/select-3.6.6.tgz", + "integrity": "sha512-JEpBosWNSXRexE/iReATei1EiVdTIwOWlLcCGw6K7oC/5/f+OHMsh2Kkt/c/RzM/to3vgR+Wbbqwrb712AWgYQ==", "dependencies": { - "@react-stately/form": "^3.0.3", - "@react-stately/list": "^3.10.5", - "@react-stately/overlays": "^3.6.7", - "@react-types/select": "^3.9.4", - "@react-types/shared": "^3.23.1", + "@react-stately/form": "^3.0.5", + "@react-stately/list": "^3.10.7", + "@react-stately/overlays": "^3.6.9", + "@react-types/select": "^3.9.6", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-stately/collections": { + "version": "3.10.9", + "resolved": "https://registry.npmjs.org/@react-stately/collections/-/collections-3.10.9.tgz", + "integrity": "sha512-plyrng6hOQMG8LrjArMA6ts/DgWyXln3g90/hFNbqe/hdVYF53sDVsj8Jb+5LtoYTpiAlV6eOvy1XR0vPZUf8w==", + "dependencies": { + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-stately/form": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@react-stately/form/-/form-3.0.5.tgz", + "integrity": "sha512-J3plwJ63HQz109OdmaTqTA8Qhvl3gcYYK7DtgKyNP6mc/Me2Q4tl2avkWoA+22NRuv5m+J8TpBk4AVHUEOwqeQ==", + "dependencies": { + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-stately/list": { + "version": "3.10.7", + "resolved": "https://registry.npmjs.org/@react-stately/list/-/list-3.10.7.tgz", + "integrity": "sha512-W5PG7uG5GQV2Q59vXJE7QLKHZIoUNEx+JmHrBUCMKUgyngSpKIIEDR/R/C1b6ZJ9jMqqZA68Zlnd5iK1/mBi1A==", + "dependencies": { + "@react-stately/collections": "^3.10.9", + "@react-stately/selection": "^3.16.1", + "@react-stately/utils": "^3.10.2", + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-stately/overlays": { + "version": "3.6.9", + "resolved": "https://registry.npmjs.org/@react-stately/overlays/-/overlays-3.6.9.tgz", + "integrity": "sha512-4chfyzKw7P2UEainm0yzjUgYwG1ovBejN88eTrn+O62x5huuMCwe0cbMxmYh4y7IhRFSee3jIJd0SP0u/+i39w==", + "dependencies": { + "@react-stately/utils": "^3.10.2", + "@react-types/overlays": "^3.8.9", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-stately/utils": { + "version": "3.10.2", + "resolved": "https://registry.npmjs.org/@react-stately/utils/-/utils-3.10.2.tgz", + "integrity": "sha512-fh6OTQtbeQC0ywp6LJuuKs6tKIgFvt/DlIZEcIpGho6/oZG229UnIk6TUekwxnDbumuYyan6D9EgUtEMmT8UIg==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-types/overlays": { + "version": "3.8.9", + "resolved": "https://registry.npmjs.org/@react-types/overlays/-/overlays-3.8.9.tgz", + "integrity": "sha512-9ni9upQgXPnR+K9cWmbYWvm3ll9gH8P/XsEZprqIV5zNLMF334jADK48h4jafb1X9RFnj0WbHo6BqcSObzjTig==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/select/node_modules/@react-types/select": { + "version": "3.9.6", + "resolved": "https://registry.npmjs.org/@react-types/select/-/select-3.9.6.tgz", + "integrity": "sha512-cVSFR0eJLup/ht1Uto+y8uyLmHO89J6wNh65SIHb3jeVz9oLBAedP3YNI2qB+F9qFMUcA8PBSLXIIuT6gXzLgQ==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-stately/selection": { - "version": "3.15.1", - "resolved": "https://registry.npmjs.org/@react-stately/selection/-/selection-3.15.1.tgz", - "integrity": "sha512-6TQnN9L0UY9w19B7xzb1P6mbUVBtW840Cw1SjgNXCB3NPaCf59SwqClYzoj8O2ZFzMe8F/nUJtfU1NS65/OLlw==", + "version": "3.16.1", + "resolved": "https://registry.npmjs.org/@react-stately/selection/-/selection-3.16.1.tgz", + "integrity": "sha512-qmnmYaXY7IhhzmIiInec1a/yPxlPSBHka6vrWddvt0S6zN7FU5cv6sm69ONUwYwLKSoaNHgOGvZhmsTzyV0O2A==", "dependencies": { - "@react-stately/collections": "^3.10.7", - "@react-stately/utils": "^3.10.1", - "@react-types/shared": "^3.23.1", + "@react-stately/collections": "^3.10.9", + "@react-stately/utils": "^3.10.2", + "@react-types/shared": "^3.24.1", "@swc/helpers": "^0.5.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/selection/node_modules/@react-stately/collections": { + "version": "3.10.9", + "resolved": "https://registry.npmjs.org/@react-stately/collections/-/collections-3.10.9.tgz", + "integrity": "sha512-plyrng6hOQMG8LrjArMA6ts/DgWyXln3g90/hFNbqe/hdVYF53sDVsj8Jb+5LtoYTpiAlV6eOvy1XR0vPZUf8w==", + "dependencies": { + "@react-types/shared": "^3.24.1", + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-stately/selection/node_modules/@react-stately/utils": { + "version": "3.10.2", + "resolved": "https://registry.npmjs.org/@react-stately/utils/-/utils-3.10.2.tgz", + "integrity": "sha512-fh6OTQtbeQC0ywp6LJuuKs6tKIgFvt/DlIZEcIpGho6/oZG229UnIk6TUekwxnDbumuYyan6D9EgUtEMmT8UIg==", + "dependencies": { + "@swc/helpers": "^0.5.0" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-stately/slider": { @@ -3848,15 +4220,26 @@ } }, "node_modules/@react-types/dialog": { - "version": "3.5.10", - "resolved": "https://registry.npmjs.org/@react-types/dialog/-/dialog-3.5.10.tgz", - "integrity": "sha512-S9ga+edOLNLZw7/zVOnZdT5T40etpzUYBXEKdFPbxyPYnERvRxJAsC1/ASuBU9fQAXMRgLZzADWV+wJoGS/X9g==", + "version": "3.5.12", + "resolved": "https://registry.npmjs.org/@react-types/dialog/-/dialog-3.5.12.tgz", + "integrity": "sha512-JmpQbSpXltqEyYfEwoqDolABIiojeExkqolHNdQlayIsfFuSxZxNwXZPOpz58Ri/iwv21JP7K3QF0Gb2Ohxl9w==", "dependencies": { - "@react-types/overlays": "^3.8.7", - "@react-types/shared": "^3.23.1" + "@react-types/overlays": "^3.8.9", + "@react-types/shared": "^3.24.1" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@react-types/dialog/node_modules/@react-types/overlays": { + "version": "3.8.9", + "resolved": "https://registry.npmjs.org/@react-types/overlays/-/overlays-3.8.9.tgz", + "integrity": "sha512-9ni9upQgXPnR+K9cWmbYWvm3ll9gH8P/XsEZprqIV5zNLMF334jADK48h4jafb1X9RFnj0WbHo6BqcSObzjTig==", + "dependencies": { + "@react-types/shared": "^3.24.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-types/grid": { @@ -3882,14 +4265,14 @@ } }, "node_modules/@react-types/listbox": { - "version": "3.4.9", - "resolved": "https://registry.npmjs.org/@react-types/listbox/-/listbox-3.4.9.tgz", - "integrity": "sha512-S5G+WmNKUIOPZxZ4svWwWQupP3C6LmVfnf8QQmPDvwYXGzVc0WovkqUWyhhjJirFDswTXRCO9p0yaTHHIlkdwQ==", + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/@react-types/listbox/-/listbox-3.5.1.tgz", + "integrity": "sha512-n5bOgD9lgfK1qaLtag9WPnu151SwXBCNn/OgGY/Br9mWRl+nPUEYtFcPX+2VCld7uThf54kwrTmzlFnaraIlcw==", "dependencies": { - "@react-types/shared": "^3.23.1" + "@react-types/shared": "^3.24.1" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-types/menu": { @@ -3957,25 +4340,25 @@ } }, "node_modules/@react-types/slider": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/@react-types/slider/-/slider-3.7.3.tgz", - "integrity": "sha512-F8qFQaD2mqug2D0XeWMmjGBikiwbdERFlhFzdvNGbypPLz3AZICBKp1ZLPWdl0DMuy03G/jy6Gl4mDobl7RT2g==", + "version": "3.7.5", + "resolved": "https://registry.npmjs.org/@react-types/slider/-/slider-3.7.5.tgz", + "integrity": "sha512-bRitwQRQjQoOcKEdPMljnvm474dwrmsc6pdsVQDh/qynzr+KO9IHuYc3qPW53WVE2hMQJDohlqtCAWQXWQ5Vcg==", "dependencies": { - "@react-types/shared": "^3.23.1" + "@react-types/shared": "^3.24.1" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-types/switch": { - "version": "3.5.3", - "resolved": "https://registry.npmjs.org/@react-types/switch/-/switch-3.5.3.tgz", - "integrity": "sha512-Nb6+J5MrPaFa8ZNFKGMzAsen/NNzl5UG/BbC65SLGPy7O0VDa/sUpn7dcu8V2xRpRwwIN/Oso4v63bt2sgdkgA==", + "version": "3.5.5", + "resolved": "https://registry.npmjs.org/@react-types/switch/-/switch-3.5.5.tgz", + "integrity": "sha512-SZx1Bd+COhAOs/RTifbZG+uq/llwba7VAKx7XBeX4LeIz1dtguy5bigOBgFTMQi4qsIVCpybSWEEl+daj4XFPw==", "dependencies": { - "@react-types/shared": "^3.23.1" + "@react-types/shared": "^3.24.1" }, "peerDependencies": { - "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0" + "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0" } }, "node_modules/@react-types/table": { @@ -4025,9 +4408,9 @@ } }, "node_modules/@reduxjs/toolkit": { - "version": "2.2.6", - "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.2.6.tgz", - "integrity": "sha512-kH0r495c5z1t0g796eDQAkYbEQ3a1OLYN9o8jQQVZyKyw367pfRGS+qZLkHYvFHiUUdafpoSlQ2QYObIApjPWA==", + "version": "2.2.7", + "resolved": "https://registry.npmjs.org/@reduxjs/toolkit/-/toolkit-2.2.7.tgz", + "integrity": "sha512-faI3cZbSdFb8yv9dhDTmGwclW0vk0z5o1cia+kf7gCbaCwHI5e+7tP57mJUv22pNcNbeA62GSrPpfrUfdXcQ6g==", "dependencies": { "immer": "^10.0.3", "redux": "^5.0.1", @@ -4048,9 +4431,9 @@ } }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.14.1.tgz", - "integrity": "sha512-fH8/o8nSUek8ceQnT7K4EQbSiV7jgkHq81m9lWZFIXjJ7lJzpWXbQFpT/Zh6OZYnpFykvzC3fbEvEAFZu03dPA==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.19.2.tgz", + "integrity": "sha512-OHflWINKtoCFSpm/WmuQaWW4jeX+3Qt3XQDepkkiFTsoxFc5BpF3Z5aDxFZgBqRjO6ATP5+b1iilp4kGIZVWlA==", "cpu": [ "arm" ], @@ -4060,9 +4443,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.14.1.tgz", - "integrity": "sha512-Y/9OHLjzkunF+KGEoJr3heiD5X9OLa8sbT1lm0NYeKyaM3oMhhQFvPB0bNZYJwlq93j8Z6wSxh9+cyKQaxS7PQ==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.19.2.tgz", + "integrity": "sha512-k0OC/b14rNzMLDOE6QMBCjDRm3fQOHAL8Ldc9bxEWvMo4Ty9RY6rWmGetNTWhPo+/+FNd1lsQYRd0/1OSix36A==", "cpu": [ "arm64" ], @@ -4072,9 +4455,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.14.1.tgz", - "integrity": "sha512-+kecg3FY84WadgcuSVm6llrABOdQAEbNdnpi5X3UwWiFVhZIZvKgGrF7kmLguvxHNQy+UuRV66cLVl3S+Rkt+Q==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.19.2.tgz", + "integrity": "sha512-IIARRgWCNWMTeQH+kr/gFTHJccKzwEaI0YSvtqkEBPj7AshElFq89TyreKNFAGh5frLfDCbodnq+Ye3dqGKPBw==", "cpu": [ "arm64" ], @@ -4084,9 +4467,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.14.1.tgz", - "integrity": "sha512-2pYRzEjVqq2TB/UNv47BV/8vQiXkFGVmPFwJb+1E0IFFZbIX8/jo1olxqqMbo6xCXf8kabANhp5bzCij2tFLUA==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.19.2.tgz", + "integrity": "sha512-52udDMFDv54BTAdnw+KXNF45QCvcJOcYGl3vQkp4vARyrcdI/cXH8VXTEv/8QWfd6Fru8QQuw1b2uNersXOL0g==", "cpu": [ "x64" ], @@ -4096,9 +4479,21 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.14.1.tgz", - "integrity": "sha512-mS6wQ6Do6/wmrF9aTFVpIJ3/IDXhg1EZcQFYHZLHqw6AzMBjTHWnCG35HxSqUNphh0EHqSM6wRTT8HsL1C0x5g==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.19.2.tgz", + "integrity": "sha512-r+SI2t8srMPYZeoa1w0o/AfoVt9akI1ihgazGYPQGRilVAkuzMGiTtexNZkrPkQsyFrvqq/ni8f3zOnHw4hUbA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.19.2.tgz", + "integrity": "sha512-+tYiL4QVjtI3KliKBGtUU7yhw0GMcJJuB9mLTCEauHEsqfk49gtUBXGtGP3h1LW8MbaTY6rSFIQV1XOBps1gBA==", "cpu": [ "arm" ], @@ -4108,9 +4503,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.14.1.tgz", - "integrity": "sha512-p9rGKYkHdFMzhckOTFubfxgyIO1vw//7IIjBBRVzyZebWlzRLeNhqxuSaZ7kCEKVkm/kuC9fVRW9HkC/zNRG2w==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.19.2.tgz", + "integrity": "sha512-OR5DcvZiYN75mXDNQQxlQPTv4D+uNCUsmSCSY2FolLf9W5I4DSoJyg7z9Ea3TjKfhPSGgMJiey1aWvlWuBzMtg==", "cpu": [ "arm64" ], @@ -4120,9 +4515,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.14.1.tgz", - "integrity": "sha512-nDY6Yz5xS/Y4M2i9JLQd3Rofh5OR8Bn8qe3Mv/qCVpHFlwtZSBYSPaU4mrGazWkXrdQ98GB//H0BirGR/SKFSw==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.19.2.tgz", + "integrity": "sha512-Hw3jSfWdUSauEYFBSFIte6I8m6jOj+3vifLg8EU3lreWulAUpch4JBjDMtlKosrBzkr0kwKgL9iCfjA8L3geoA==", "cpu": [ "arm64" ], @@ -4132,11 +4527,11 @@ ] }, "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.14.1.tgz", - "integrity": "sha512-im7HE4VBL+aDswvcmfx88Mp1soqL9OBsdDBU8NqDEYtkri0qV0THhQsvZtZeNNlLeCUQ16PZyv7cqutjDF35qw==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.19.2.tgz", + "integrity": "sha512-rhjvoPBhBwVnJRq/+hi2Q3EMiVF538/o9dBuj9TVLclo9DuONqt5xfWSaE6MYiFKpo/lFPJ/iSI72rYWw5Hc7w==", "cpu": [ - "ppc64le" + "ppc64" ], "optional": true, "os": [ @@ -4144,9 +4539,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.14.1.tgz", - "integrity": "sha512-RWdiHuAxWmzPJgaHJdpvUUlDz8sdQz4P2uv367T2JocdDa98iRw2UjIJ4QxSyt077mXZT2X6pKfT2iYtVEvOFw==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.19.2.tgz", + "integrity": "sha512-EAz6vjPwHHs2qOCnpQkw4xs14XJq84I81sDRGPEjKPFVPBw7fwvtwhVjcZR6SLydCv8zNK8YGFblKWd/vRmP8g==", "cpu": [ "riscv64" ], @@ -4156,9 +4551,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.14.1.tgz", - "integrity": "sha512-VMgaGQ5zRX6ZqV/fas65/sUGc9cPmsntq2FiGmayW9KMNfWVG/j0BAqImvU4KTeOOgYSf1F+k6at1UfNONuNjA==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.19.2.tgz", + "integrity": "sha512-IJSUX1xb8k/zN9j2I7B5Re6B0NNJDJ1+soezjNojhT8DEVeDNptq2jgycCOpRhyGj0+xBn7Cq+PK7Q+nd2hxLA==", "cpu": [ "s390x" ], @@ -4168,9 +4563,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.14.1.tgz", - "integrity": "sha512-9Q7DGjZN+hTdJomaQ3Iub4m6VPu1r94bmK2z3UeWP3dGUecRC54tmVu9vKHTm1bOt3ASoYtEz6JSRLFzrysKlA==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.19.2.tgz", + "integrity": "sha512-OgaToJ8jSxTpgGkZSkwKE+JQGihdcaqnyHEFOSAU45utQ+yLruE1dkonB2SDI8t375wOKgNn8pQvaWY9kPzxDQ==", "cpu": [ "x64" ], @@ -4180,9 +4575,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.14.1.tgz", - "integrity": "sha512-JNEG/Ti55413SsreTguSx0LOVKX902OfXIKVg+TCXO6Gjans/k9O6ww9q3oLGjNDaTLxM+IHFMeXy/0RXL5R/g==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.19.2.tgz", + "integrity": "sha512-5V3mPpWkB066XZZBgSd1lwozBk7tmOkKtquyCJ6T4LN3mzKENXyBwWNQn8d0Ci81hvlBw5RoFgleVpL6aScLYg==", "cpu": [ "x64" ], @@ -4192,9 +4587,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.14.1.tgz", - "integrity": "sha512-ryS22I9y0mumlLNwDFYZRDFLwWh3aKaC72CWjFcFvxK0U6v/mOkM5Up1bTbCRAhv3kEIwW2ajROegCIQViUCeA==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.19.2.tgz", + "integrity": "sha512-ayVstadfLeeXI9zUPiKRVT8qF55hm7hKa+0N1V6Vj+OTNFfKSoUxyZvzVvgtBxqSb5URQ8sK6fhwxr9/MLmxdA==", "cpu": [ "arm64" ], @@ -4204,9 +4599,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.14.1.tgz", - "integrity": "sha512-TdloItiGk+T0mTxKx7Hp279xy30LspMso+GzQvV2maYePMAWdmrzqSNZhUpPj3CGw12aGj57I026PgLCTu8CGg==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.19.2.tgz", + "integrity": "sha512-Mda7iG4fOLHNsPqjWSjANvNZYoW034yxgrndof0DwCy0D3FvTjeNo+HGE6oGWgvcLZNLlcp0hLEFcRs+UGsMLg==", "cpu": [ "ia32" ], @@ -4216,9 +4611,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.14.1.tgz", - "integrity": "sha512-wQGI+LY/Py20zdUPq+XCem7JcPOyzIJBm3dli+56DJsQOHbnXZFEwgmnC6el1TPAfC8lBT3m+z69RmLykNUbew==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.19.2.tgz", + "integrity": "sha512-DPi0ubYhSow/00YqmG1jWm3qt1F8aXziHc/UNy8bo9cpCacqhuWu+iSq/fp2SyEQK7iYTZ60fBU9cat3MXTjIQ==", "cpu": [ "x64" ], @@ -4242,9 +4637,9 @@ } }, "node_modules/@tailwindcss/typography": { - "version": "0.5.13", - "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.13.tgz", - "integrity": "sha512-ADGcJ8dX21dVVHIwTRgzrcunY6YY9uSlAHHGVKvkA+vLc5qLwEszvKts40lx7z0qc4clpjclwLeK5rVCV2P/uw==", + "version": "0.5.14", + "resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.14.tgz", + "integrity": "sha512-ZvOCjUbsJBjL9CxQBn+VEnFpouzuKhxh2dH8xMIWHILL+HfOYtlAkWcyoon8LlzE53d2Yo6YO6pahKKNW3q1YQ==", "dev": true, "dependencies": { "lodash.castarray": "^4.4.0", @@ -4256,23 +4651,10 @@ "tailwindcss": ">=3.0.0 || insiders" } }, - "node_modules/@tailwindcss/typography/node_modules/postcss-selector-parser": { - "version": "6.0.10", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", - "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", - "dev": true, - "dependencies": { - "cssesc": "^3.0.0", - "util-deprecate": "^1.0.2" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/@testing-library/dom": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.1.0.tgz", - "integrity": "sha512-wdsYKy5zupPyLCW2Je5DLHSxSfbIp6h80WoHOQc+RPtmPGA52O9x5MJEkv92Sjonpq+poOAtUKhh1kBGAXBrNA==", + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.0.tgz", + "integrity": "sha512-pemlzrSESWbdAloYml3bAJMEfNh1Z7EduzqPKprCH5S341frlpYnUEW0H72dLxa6IsYr+mPno20GiSm+h9dEdQ==", "dev": true, "peer": true, "dependencies": { @@ -4406,9 +4788,9 @@ } }, "node_modules/@types/babel__traverse": { - "version": "7.20.5", - "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.5.tgz", - "integrity": "sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ==", + "version": "7.20.6", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.20.6.tgz", + "integrity": "sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==", "dependencies": { "@babel/types": "^7.20.7" } @@ -4435,11 +4817,11 @@ } }, "node_modules/@types/hast": { - "version": "2.3.10", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", - "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", "dependencies": { - "@types/unist": "^2" + "@types/unist": "*" } }, "node_modules/@types/json5": { @@ -4462,9 +4844,9 @@ } }, "node_modules/@types/mdast": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.3.tgz", - "integrity": "sha512-LsjtqsyF+d2/yFOYaN22dHZI1Cpwkrj+g06G8+qtUKlhovPW89YhqSnfKtMbkgmEtYpH2gydRNULd6y8mciAFg==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", "dependencies": { "@types/unist": "*" } @@ -4475,12 +4857,12 @@ "integrity": "sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g==" }, "node_modules/@types/node": { - "version": "20.14.12", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.12.tgz", - "integrity": "sha512-r7wNXakLeSsGT0H1AU863vS2wa5wBOK4bWMjZz2wj+8nBx+m5PeIn0k8AloSLpRuiwdRQZwarZqHE4FNArPuJQ==", + "version": "22.1.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.1.0.tgz", + "integrity": "sha512-AOmuRF0R2/5j1knA3c6G3HOk523Ga+l+ZXltX8SF1+5oqcXijjfTd8fY3XRZqSihEu9XhtQnKYLmkFaoxgsJHw==", "devOptional": true, "dependencies": { - "undici-types": "~5.26.4" + "undici-types": "~6.13.0" } }, "node_modules/@types/prop-types": { @@ -4525,9 +4907,9 @@ } }, "node_modules/@types/unist": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", - "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", + "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" }, "node_modules/@types/use-sync-external-store": { "version": "0.0.3", @@ -4535,16 +4917,16 @@ "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.17.0.tgz", - "integrity": "sha512-pyiDhEuLM3PuANxH7uNYan1AaFs5XE0zw1hq69JBvGvE7gSuEoQl1ydtEe/XQeoC3GQxLXyOVa5kNOATgM638A==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.18.0.tgz", + "integrity": "sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "7.17.0", - "@typescript-eslint/type-utils": "7.17.0", - "@typescript-eslint/utils": "7.17.0", - "@typescript-eslint/visitor-keys": "7.17.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/type-utils": "7.18.0", + "@typescript-eslint/utils": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", @@ -4568,15 +4950,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.17.0.tgz", - "integrity": "sha512-puiYfGeg5Ydop8eusb/Hy1k7QmOU6X3nvsqCgzrB2K4qMavK//21+PzNE8qeECgNOIoertJPUC1SpegHDI515A==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.18.0.tgz", + "integrity": "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "7.17.0", - "@typescript-eslint/types": "7.17.0", - "@typescript-eslint/typescript-estree": "7.17.0", - "@typescript-eslint/visitor-keys": "7.17.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", "debug": "^4.3.4" }, "engines": { @@ -4596,13 +4978,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.17.0.tgz", - "integrity": "sha512-0P2jTTqyxWp9HiKLu/Vemr2Rg1Xb5B7uHItdVZ6iAenXmPo4SZ86yOPCJwMqpCyaMiEHTNqizHfsbmCFT1x9SA==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz", + "integrity": "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.17.0", - "@typescript-eslint/visitor-keys": "7.17.0" + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -4613,13 +4995,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.17.0.tgz", - "integrity": "sha512-XD3aaBt+orgkM/7Cei0XNEm1vwUxQ958AOLALzPlbPqb8C1G8PZK85tND7Jpe69Wualri81PLU+Zc48GVKIMMA==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.18.0.tgz", + "integrity": "sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "7.17.0", - "@typescript-eslint/utils": "7.17.0", + "@typescript-eslint/typescript-estree": "7.18.0", + "@typescript-eslint/utils": "7.18.0", "debug": "^4.3.4", "ts-api-utils": "^1.3.0" }, @@ -4640,9 +5022,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.17.0.tgz", - "integrity": "sha512-a29Ir0EbyKTKHnZWbNsrc/gqfIBqYPwj3F2M+jWE/9bqfEHg0AMtXzkbUkOG6QgEScxh2+Pz9OXe11jHDnHR7A==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.18.0.tgz", + "integrity": "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==", "dev": true, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -4653,13 +5035,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.17.0.tgz", - "integrity": "sha512-72I3TGq93t2GoSBWI093wmKo0n6/b7O4j9o8U+f65TVD0FS6bI2180X5eGEr8MA8PhKMvYe9myZJquUT2JkCZw==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz", + "integrity": "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.17.0", - "@typescript-eslint/visitor-keys": "7.17.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -4681,15 +5063,15 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.17.0.tgz", - "integrity": "sha512-r+JFlm5NdB+JXc7aWWZ3fKSm1gn0pkswEwIYsrGPdsT2GjsRATAKXiNtp3vgAAO1xZhX8alIOEQnNMl3kbTgJw==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.18.0.tgz", + "integrity": "sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "7.17.0", - "@typescript-eslint/types": "7.17.0", - "@typescript-eslint/typescript-estree": "7.17.0" + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0" }, "engines": { "node": "^18.18.0 || >=20.0.0" @@ -4703,12 +5085,12 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "7.17.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.17.0.tgz", - "integrity": "sha512-RVGC9UhPOCsfCdI9pU++K4nD7to+jTcMIbXTSOcrLqUEW6gF2pU1UUbYJKc9cvcRSK1UDeMJ7pdMxf4bhMpV/A==", + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz", + "integrity": "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "7.17.0", + "@typescript-eslint/types": "7.18.0", "eslint-visitor-keys": "^3.4.3" }, "engines": { @@ -4769,20 +5151,6 @@ "vitest": "1.6.0" } }, - "node_modules/@vitest/coverage-v8/node_modules/istanbul-lib-source-maps": { - "version": "5.0.6", - "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", - "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", - "dev": true, - "dependencies": { - "@jridgewell/trace-mapping": "^0.3.23", - "debug": "^4.1.1", - "istanbul-lib-coverage": "^3.0.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/@vitest/expect": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.6.0.tgz", @@ -4827,9 +5195,9 @@ } }, "node_modules/@vitest/runner/node_modules/yocto-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", - "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.1.1.tgz", + "integrity": "sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g==", "dev": true, "engines": { "node": ">=12.20" @@ -4957,9 +5325,9 @@ "integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A==" }, "node_modules/acorn": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", - "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "version": "8.12.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", + "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -4978,10 +5346,13 @@ } }, "node_modules/acorn-walk": { - "version": "8.3.2", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz", - "integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==", + "version": "8.3.3", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.3.tgz", + "integrity": "sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw==", "dev": true, + "dependencies": { + "acorn": "^8.11.0" + }, "engines": { "node": ">=0.4.0" } @@ -5014,6 +5385,21 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ansi-escapes": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.0.0.tgz", + "integrity": "sha512-GdYO7a61mR0fOlAsvC9/rIHf7L96sBc6dEWzeOu+KAea5bZyQRPIpojrVoI4AXGJS/ycu/fBTdLrUkA4ODrvjw==", + "dev": true, + "dependencies": { + "environment": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -5254,9 +5640,9 @@ "dev": true }, "node_modules/autoprefixer": { - "version": "10.4.19", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.19.tgz", - "integrity": "sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==", + "version": "10.4.20", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", + "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==", "dev": true, "funding": [ { @@ -5273,11 +5659,11 @@ } ], "dependencies": { - "browserslist": "^4.23.0", - "caniuse-lite": "^1.0.30001599", + "browserslist": "^4.23.3", + "caniuse-lite": "^1.0.30001646", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", - "picocolors": "^1.0.0", + "picocolors": "^1.0.1", "postcss-value-parser": "^4.2.0" }, "bin": { @@ -5306,9 +5692,9 @@ } }, "node_modules/axe-core": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.9.1.tgz", - "integrity": "sha512-QbUdXJVTpvUTHU7871ppZkdOLBeGUKBQWHkHrvN2V9IQWGMt61zf3B45BtzjxEJzYuj0JBjBZP/hmYS/R9pmAw==", + "version": "4.10.0", + "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.0.tgz", + "integrity": "sha512-Mr2ZakwQ7XUAjp7pAwQWRhhK8mQQ6JAaNWSjmjxil0R8BPioMtQsTLOolGYkji1rcL++3dCqZA3zWqpT+9Ew6g==", "dev": true, "engines": { "node": ">=4" @@ -5368,9 +5754,9 @@ } }, "node_modules/browserslist": { - "version": "4.23.0", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.0.tgz", - "integrity": "sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ==", + "version": "4.23.3", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz", + "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==", "funding": [ { "type": "opencollective", @@ -5386,10 +5772,10 @@ } ], "dependencies": { - "caniuse-lite": "^1.0.30001587", - "electron-to-chromium": "^1.4.668", - "node-releases": "^2.0.14", - "update-browserslist-db": "^1.0.13" + "caniuse-lite": "^1.0.30001646", + "electron-to-chromium": "^1.5.4", + "node-releases": "^2.0.18", + "update-browserslist-db": "^1.1.0" }, "bin": { "browserslist": "cli.js" @@ -5444,9 +5830,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001607", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001607.tgz", - "integrity": "sha512-WcvhVRjXLKFB/kmOFVwELtMxyhq3iM/MvmXcyCe2PNf166c39mptscOc/45TTS96n2gpNV2z7+NakArTWZCQ3w==", + "version": "1.0.30001646", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001646.tgz", + "integrity": "sha512-dRg00gudiBDDTmUhClSdv3hqRfpbOnU28IpI1T6PBTLWa+kOj0681C8uML3PifYfREuBrVjDGhL3adYpBT6spw==", "funding": [ { "type": "opencollective", @@ -5472,9 +5858,9 @@ } }, "node_modules/chai": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", - "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.5.0.tgz", + "integrity": "sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw==", "dev": true, "dependencies": { "assertion-error": "^1.1.0", @@ -5483,7 +5869,7 @@ "get-func-name": "^2.0.2", "loupe": "^2.3.6", "pathval": "^1.1.1", - "type-detect": "^4.0.8" + "type-detect": "^4.1.0" }, "engines": { "node": ">=4" @@ -5506,9 +5892,9 @@ } }, "node_modules/character-entities": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", - "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -5524,18 +5910,18 @@ } }, "node_modules/character-entities-legacy": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", - "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" } }, "node_modules/character-reference-invalid": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", - "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -5588,15 +5974,15 @@ } }, "node_modules/cli-cursor": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-4.0.0.tgz", - "integrity": "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==", + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", + "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", "dev": true, "dependencies": { - "restore-cursor": "^4.0.0" + "restore-cursor": "^5.0.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -5687,9 +6073,9 @@ } }, "node_modules/comma-separated-tokens": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", - "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -5715,6 +6101,12 @@ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "dev": true }, + "node_modules/confbox": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.1.7.tgz", + "integrity": "sha512-uJcB/FKZtBMCJpK8MQji6bJHgu1tixKPxRLeGkNzBoOZzpnZUJm0jm2/sBDWcuBx1dYgxV4JU+g5hmNxCyAmdA==", + "dev": true + }, "node_modules/confusing-browser-globals": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz", @@ -5776,6 +6168,12 @@ "node": ">=18" } }, + "node_modules/cssstyle/node_modules/rrweb-cssom": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz", + "integrity": "sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==", + "dev": true + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -5852,9 +6250,9 @@ } }, "node_modules/debug": { - "version": "4.3.4", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", - "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.6.tgz", + "integrity": "sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg==", "dependencies": { "ms": "2.1.2" }, @@ -5885,19 +6283,10 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/decode-named-character-reference/node_modules/character-entities": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", - "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, "node_modules/deep-eql": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.3.tgz", - "integrity": "sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==", + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-4.1.4.tgz", + "integrity": "sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg==", "dev": true, "dependencies": { "type-detect": "^4.0.0" @@ -6076,9 +6465,9 @@ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==" }, "node_modules/electron-to-chromium": { - "version": "1.4.729", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.4.729.tgz", - "integrity": "sha512-bx7+5Saea/qu14kmPTDHQxkp2UnziG3iajUQu3BxFvCOnpAJdDbMV4rSl+EqFDkkpNNVUFlR1kDfpL59xfy1HA==" + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.4.tgz", + "integrity": "sha512-orzA81VqLyIGUEA77YkVA1D+N+nNfl2isJVjjmOyrlxuooZ19ynb+dOlaDTqd/idKRS9lDCSBmtzM+kyCsMnkA==" }, "node_modules/emoji-regex": { "version": "9.2.2", @@ -6097,6 +6486,18 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/environment": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz", + "integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==", + "dev": true, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/es-abstract": { "version": "1.23.3", "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.23.3.tgz", @@ -6337,7 +6738,6 @@ "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", "dev": true, - "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", @@ -6852,18 +7252,6 @@ "node": "*" } }, - "node_modules/eslint/node_modules/type-fest": { - "version": "0.20.2", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", - "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/espree": { "version": "9.6.1", "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", @@ -6882,9 +7270,9 @@ } }, "node_modules/esquery": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.5.0.tgz", - "integrity": "sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==", + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", "dev": true, "dependencies": { "estraverse": "^5.1.0" @@ -6947,6 +7335,29 @@ "integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==", "dev": true }, + "node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dev": true, + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, "node_modules/extend": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", @@ -7099,9 +7510,9 @@ } }, "node_modules/foreground-child": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz", - "integrity": "sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==", + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.2.1.tgz", + "integrity": "sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==", "dependencies": { "cross-spawn": "^7.0.0", "signal-exit": "^4.0.1" @@ -7113,17 +7524,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/foreground-child/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/form-data": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", @@ -7160,9 +7560,9 @@ } }, "node_modules/framer-motion": { - "version": "11.3.8", - "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.3.8.tgz", - "integrity": "sha512-1D+RDTsIp4Rz2dq/oToqSEc9idEQwgBRQyBq4rGpFba+0Z+GCbj9z1s0+ikFbanWe3YJ0SqkNlDe08GcpFGj5A==", + "version": "11.3.21", + "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-11.3.21.tgz", + "integrity": "sha512-D+hfIsvzV8eL/iycld4K+tKlg2Q2LdwnrcBEohtGw3cG1AIuNYATbT5RUqIM1ndsAk+EfGhoSGf0UaiFodc5Tw==", "peer": true, "dependencies": { "tslib": "^2.4.0" @@ -7294,6 +7694,18 @@ "node": ">=6" } }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "dev": true, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/get-symbol-description": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/get-symbol-description/-/get-symbol-description-1.0.2.tgz", @@ -7315,6 +7727,7 @@ "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", "dev": true, "dependencies": { "fs.realpath": "^1.0.0", @@ -7373,12 +7786,13 @@ } }, "node_modules/globalthis": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.3.tgz", - "integrity": "sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz", + "integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==", "dev": true, "dependencies": { - "define-properties": "^1.1.3" + "define-properties": "^1.2.1", + "gopd": "^1.0.1" }, "engines": { "node": ">= 0.4" @@ -7554,46 +7968,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/hast-util-to-jsx-runtime/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/hast-util-to-jsx-runtime/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, - "node_modules/hast-util-to-jsx-runtime/node_modules/comma-separated-tokens": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", - "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/hast-util-to-jsx-runtime/node_modules/property-information": { - "version": "6.5.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", - "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/hast-util-to-jsx-runtime/node_modules/space-separated-tokens": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", - "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, "node_modules/hast-util-whitespace": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", @@ -7606,14 +7980,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/hast-util-whitespace/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, "node_modules/hastscript": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", @@ -7630,6 +7996,49 @@ "url": "https://opencollective.com/unified" } }, + "node_modules/hastscript/node_modules/@types/hast": { + "version": "2.3.10", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz", + "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/hastscript/node_modules/@types/unist": { + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", + "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" + }, + "node_modules/hastscript/node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hastscript/node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/hastscript/node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/highlight.js": { "version": "10.7.3", "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", @@ -7699,10 +8108,19 @@ "node": ">= 14" } }, + "node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "dev": true, + "engines": { + "node": ">=16.17.0" + } + }, "node_modules/husky": { - "version": "9.1.2", - "resolved": "https://registry.npmjs.org/husky/-/husky-9.1.2.tgz", - "integrity": "sha512-1/aDMXZdhr1VdJJTLt6e7BipM0Jd9qkpubPiIplon1WmCeOy3nnzsCMeBqS9AsL5ioonl8F8y/F2CLOmk19/Pw==", + "version": "9.1.4", + "resolved": "https://registry.npmjs.org/husky/-/husky-9.1.4.tgz", + "integrity": "sha512-bho94YyReb4JV7LYWRWxZ/xr6TtOTt8cMfmQ39MQYJ7f/YE268s3GdghGwi+y4zAeqewE5zYLvuhV0M0ijsDEA==", "dev": true, "bin": { "husky": "bin.js" @@ -7774,9 +8192,9 @@ } }, "node_modules/immer": { - "version": "10.0.4", - "resolved": "https://registry.npmjs.org/immer/-/immer-10.0.4.tgz", - "integrity": "sha512-cuBuGK40P/sk5IzWa9QPUaAdvPHjkk1c+xYsd9oZw+YQQEV+10G0P5uMpGctZZKnyQ+ibRO08bD25nWLmYi2pw==", + "version": "10.1.1", + "resolved": "https://registry.npmjs.org/immer/-/immer-10.1.1.tgz", + "integrity": "sha512-s2MPrmjovJcoMaHtx6K11Ra7oD05NT97w1IC5zpMkT6Atjr7H8LjaDd81iIxUYpMKSRRNMJE703M1Fhr/TctHw==", "funding": { "type": "opencollective", "url": "https://opencollective.com/immer" @@ -7820,6 +8238,7 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "dev": true, "dependencies": { "once": "^1.3.0", @@ -7871,21 +8290,21 @@ } }, "node_modules/is-alphabetical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", - "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" } }, "node_modules/is-alphanumerical": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", - "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", "dependencies": { - "is-alphabetical": "^1.0.0", - "is-decimal": "^1.0.0" + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" }, "funding": { "type": "github", @@ -7924,6 +8343,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-arrayish": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", + "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + }, "node_modules/is-async-function": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-async-function/-/is-async-function-2.0.0.tgz", @@ -7991,11 +8415,14 @@ } }, "node_modules/is-core-module": { - "version": "2.13.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz", - "integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==", + "version": "2.15.0", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.15.0.tgz", + "integrity": "sha512-Dd+Lb2/zvk9SKy1TGCt1wFJFo/MWBPMX5x7KcvLajWTGuomczdQX61PvY5yK6SVACwpoexWo81IfFyoKY2QnTA==", "dependencies": { - "hasown": "^2.0.0" + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" }, "funding": { "url": "https://github.com/sponsors/ljharb" @@ -8032,9 +8459,9 @@ } }, "node_modules/is-decimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", - "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8099,9 +8526,9 @@ } }, "node_modules/is-hexadecimal": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", - "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -8223,6 +8650,18 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "dev": true, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-string": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/is-string/-/is-string-1.0.7.tgz", @@ -8342,6 +8781,20 @@ "node": ">=10" } }, + "node_modules/istanbul-lib-source-maps": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", + "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", + "dev": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.23", + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/istanbul-reports": { "version": "3.1.7", "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.1.7.tgz", @@ -8369,15 +8822,12 @@ } }, "node_modules/jackspeak": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-2.3.6.tgz", - "integrity": "sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==", + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", "dependencies": { "@isaacs/cliui": "^8.0.2" }, - "engines": { - "node": ">=14" - }, "funding": { "url": "https://github.com/sponsors/isaacs" }, @@ -8386,9 +8836,9 @@ } }, "node_modules/jiti": { - "version": "1.21.0", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.0.tgz", - "integrity": "sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==", + "version": "1.21.6", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz", + "integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==", "bin": { "jiti": "bin/jiti.js" } @@ -8458,12 +8908,6 @@ } } }, - "node_modules/jsdom/node_modules/rrweb-cssom": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz", - "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==", - "dev": true - }, "node_modules/jsesc": { "version": "2.5.2", "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz", @@ -8504,12 +8948,6 @@ "node": ">=6" } }, - "node_modules/jsonc-parser": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.1.tgz", - "integrity": "sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==", - "dev": true - }, "node_modules/jsx-ast-utils": { "version": "3.3.5", "resolved": "https://registry.npmjs.org/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz", @@ -8535,9 +8973,9 @@ } }, "node_modules/language-subtag-registry": { - "version": "0.3.22", - "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.22.tgz", - "integrity": "sha512-tN0MCzyWnoz/4nHS6uxdlFWoUZT7ABptwKPQ52Ea7URk6vll88bWBVhodtnlfEuCcKWNGoc+uGbw1cwa9IKh/w==", + "version": "0.3.23", + "resolved": "https://registry.npmjs.org/language-subtag-registry/-/language-subtag-registry-0.3.23.tgz", + "integrity": "sha512-0K65Lea881pHotoGEa5gDlMxt3pctLi2RplBb7Ezh4rRdLEOtgi7n4EwK9lamnUCkKBqaeKRVebTq6BAxSkpXQ==", "dev": true }, "node_modules/language-tags": { @@ -8566,9 +9004,9 @@ } }, "node_modules/lilconfig": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.1.tgz", - "integrity": "sha512-O18pf7nyvHTckunPWCV1XUNXU1piu01y2b7ATJ0ppkUkk8ocqVWBrYjJBCwHDjD/ZWcfyrA0P4gKhzWGi5EINQ==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz", + "integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==", "engines": { "node": ">=14" }, @@ -8582,21 +9020,21 @@ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==" }, "node_modules/lint-staged": { - "version": "15.2.7", - "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-15.2.7.tgz", - "integrity": "sha512-+FdVbbCZ+yoh7E/RosSdqKJyUM2OEjTciH0TFNkawKgvFp1zbGlEC39RADg+xKBG1R4mhoH2j85myBQZ5wR+lw==", + "version": "15.2.8", + "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-15.2.8.tgz", + "integrity": "sha512-PUWFf2zQzsd9EFU+kM1d7UP+AZDbKFKuj+9JNVTBkhUFhbg4MAt6WfyMMwBfM4lYqd4D2Jwac5iuTu9rVj4zCQ==", "dev": true, "dependencies": { "chalk": "~5.3.0", "commander": "~12.1.0", - "debug": "~4.3.4", + "debug": "~4.3.6", "execa": "~8.0.1", - "lilconfig": "~3.1.1", - "listr2": "~8.2.1", + "lilconfig": "~3.1.2", + "listr2": "~8.2.4", "micromatch": "~4.0.7", "pidtree": "~0.6.0", "string-argv": "~0.3.2", - "yaml": "~2.4.2" + "yaml": "~2.5.0" }, "bin": { "lint-staged": "bin/lint-staged.js" @@ -8620,151 +9058,17 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/lint-staged/node_modules/execa": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", - "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^8.0.1", - "human-signals": "^5.0.0", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^4.1.0", - "strip-final-newline": "^3.0.0" - }, - "engines": { - "node": ">=16.17" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/lint-staged/node_modules/get-stream": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", - "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", - "dev": true, - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lint-staged/node_modules/human-signals": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", - "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", - "dev": true, - "engines": { - "node": ">=16.17.0" - } - }, - "node_modules/lint-staged/node_modules/is-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", - "dev": true, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lint-staged/node_modules/mimic-fn": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lint-staged/node_modules/npm-run-path": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", - "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", - "dev": true, - "dependencies": { - "path-key": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lint-staged/node_modules/onetime": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", - "dev": true, - "dependencies": { - "mimic-fn": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lint-staged/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/lint-staged/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/lint-staged/node_modules/strip-final-newline": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/listr2": { - "version": "8.2.1", - "resolved": "https://registry.npmjs.org/listr2/-/listr2-8.2.1.tgz", - "integrity": "sha512-irTfvpib/rNiD637xeevjO2l3Z5loZmuaRi0L0YE5LfijwVY96oyVn0DFD3o/teAok7nfobMG1THvvcHh/BP6g==", + "version": "8.2.4", + "resolved": "https://registry.npmjs.org/listr2/-/listr2-8.2.4.tgz", + "integrity": "sha512-opevsywziHd3zHCVQGAj8zu+Z3yHNkkoYhWIGnq54RrCVwLz0MozotJEDnKsIBLvkfLGN6BLOyAeRrYI0pKA4g==", "dev": true, "dependencies": { "cli-truncate": "^4.0.0", "colorette": "^2.0.20", "eventemitter3": "^5.0.1", - "log-update": "^6.0.0", - "rfdc": "^1.3.1", + "log-update": "^6.1.0", + "rfdc": "^1.4.1", "wrap-ansi": "^9.0.0" }, "engines": { @@ -8857,14 +9161,14 @@ "integrity": "sha512-XeqSp49hNGmlkj2EJlfrQFIzQ6lXdNro9sddtQzcJY8QaoC2GO0DT7xaIokHeyM+mIT0mPMlPvkYzg2xCuHdZg==" }, "node_modules/log-update": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/log-update/-/log-update-6.0.0.tgz", - "integrity": "sha512-niTvB4gqvtof056rRIrTZvjNYE4rCUzO6X/X+kYjd7WFxXeJ0NwEFnRxX6ehkvv3jTwrXnNdtAak5XYZuIyPFw==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/log-update/-/log-update-6.1.0.tgz", + "integrity": "sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==", "dev": true, "dependencies": { - "ansi-escapes": "^6.2.0", - "cli-cursor": "^4.0.0", - "slice-ansi": "^7.0.0", + "ansi-escapes": "^7.0.0", + "cli-cursor": "^5.0.0", + "slice-ansi": "^7.1.0", "strip-ansi": "^7.1.0", "wrap-ansi": "^9.0.0" }, @@ -8875,18 +9179,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/log-update/node_modules/ansi-escapes": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-6.2.1.tgz", - "integrity": "sha512-4nJ3yixlEthEJ9Rk4vPcdBRkZvQZlYyu8j4/Mqz5sgIkddmEnH2Yj2ZrnP9S3tQOvSNRUIgVNF/1yPpRAGNRig==", - "dev": true, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/log-update/node_modules/ansi-regex": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", @@ -9018,12 +9310,12 @@ } }, "node_modules/magic-string": { - "version": "0.30.10", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.10.tgz", - "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "version": "0.30.11", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.11.tgz", + "integrity": "sha512-+Wri9p0QHMy+545hKww7YAu5NyzF8iomPL/RQazugQ9+Ez4Ic3mERMd8ZTX5rfK944j+560ZJi8iAwgak1Ac7A==", "dev": true, "dependencies": { - "@jridgewell/sourcemap-codec": "^1.4.15" + "@jridgewell/sourcemap-codec": "^1.5.0" } }, "node_modules/magicast": { @@ -9053,9 +9345,9 @@ } }, "node_modules/mdast-util-from-markdown": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.0.tgz", - "integrity": "sha512-n7MTOr/z+8NAX/wmhhDji8O3bRvPTV/U0oTCaZJkjhPSKTPhS3xufVhKGF8s1pJ7Ox4QgoIU7KHseh09S+9rTA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.1.tgz", + "integrity": "sha512-aJEUyzZ6TzlsX2s5B4Of7lN7EQtAxvtradMMglCQDyaTFgse6CmtmdJ15ElnVRlCg1vpNyVtbem0PWzlNieZsA==", "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", @@ -9075,11 +9367,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-from-markdown/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/mdast-util-mdx-expression": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.0.tgz", @@ -9097,14 +9384,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-mdx-expression/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, "node_modules/mdast-util-mdx-jsx": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.1.2.tgz", @@ -9129,110 +9408,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-mdx-jsx/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, - "node_modules/mdast-util-mdx-jsx/node_modules/character-entities": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", - "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/character-entities-legacy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", - "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/character-reference-invalid": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", - "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/is-alphabetical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", - "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/is-alphanumerical": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", - "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", - "dependencies": { - "is-alphabetical": "^2.0.0", - "is-decimal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/is-decimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", - "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/is-hexadecimal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", - "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/parse-entities": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", - "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", - "dependencies": { - "@types/unist": "^2.0.0", - "character-entities": "^2.0.0", - "character-entities-legacy": "^3.0.0", - "character-reference-invalid": "^2.0.0", - "decode-named-character-reference": "^1.0.0", - "is-alphanumerical": "^2.0.0", - "is-decimal": "^2.0.0", - "is-hexadecimal": "^2.0.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, - "node_modules/mdast-util-mdx-jsx/node_modules/parse-entities/node_modules/@types/unist": { - "version": "2.0.10", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", - "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" - }, "node_modules/mdast-util-mdxjs-esm": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", @@ -9250,14 +9425,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-mdxjs-esm/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, "node_modules/mdast-util-phrasing": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", @@ -9272,9 +9439,9 @@ } }, "node_modules/mdast-util-to-hast": { - "version": "13.1.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.1.0.tgz", - "integrity": "sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==", + "version": "13.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", + "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", @@ -9291,14 +9458,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-to-hast/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, "node_modules/mdast-util-to-markdown": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz", @@ -9318,11 +9477,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/mdast-util-to-markdown/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/mdast-util-to-string": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", @@ -9803,13 +9957,28 @@ "node": ">= 0.6" } }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mimic-function": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz", + "integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==", "dev": true, "engines": { - "node": ">=6" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/min-indent": { @@ -9822,9 +9991,9 @@ } }, "node_modules/minimatch": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.4.tgz", - "integrity": "sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==", + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "dependencies": { "brace-expansion": "^2.0.1" }, @@ -9845,23 +10014,23 @@ } }, "node_modules/minipass": { - "version": "7.0.4", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.0.4.tgz", - "integrity": "sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==", + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", "engines": { "node": ">=16 || 14 >=14.17" } }, "node_modules/mlly": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", - "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.7.1.tgz", + "integrity": "sha512-rrVRZRELyQzrIUAVMHxP97kv+G786pHmOKzuFII8zDYahFBS7qnHh2AlYSl1GAHhaMPCz6/oHjVMcfFYgFYHgA==", "dev": true, "dependencies": { "acorn": "^8.11.3", "pathe": "^1.1.2", - "pkg-types": "^1.0.3", - "ufo": "^1.3.2" + "pkg-types": "^1.1.1", + "ufo": "^1.5.3" } }, "node_modules/monaco-editor": { @@ -9946,9 +10115,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.14", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz", - "integrity": "sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw==" + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz", + "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==" }, "node_modules/normalize-path": { "version": "3.0.0", @@ -9967,6 +10136,33 @@ "node": ">=0.10.0" } }, + "node_modules/npm-run-path": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", + "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "dev": true, + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/nwsapi": { "version": "2.2.12", "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.12.tgz", @@ -9990,10 +10186,13 @@ } }, "node_modules/object-inspect": { - "version": "1.13.1", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", - "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz", + "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==", "dev": true, + "engines": { + "node": ">= 0.4" + }, "funding": { "url": "https://github.com/sponsors/ljharb" } @@ -10114,32 +10313,32 @@ } }, "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", "dev": true, "dependencies": { - "mimic-fn": "^2.1.0" + "mimic-fn": "^4.0.0" }, "engines": { - "node": ">=6" + "node": ">=12" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/optionator": { - "version": "0.9.3", - "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", - "integrity": "sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==", + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", "dev": true, "dependencies": { - "@aashutoshrathi/word-wrap": "^1.2.3", "deep-is": "^0.1.3", "fast-levenshtein": "^2.0.6", "levn": "^0.4.1", "prelude-ls": "^1.2.1", - "type-check": "^0.4.0" + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" }, "engines": { "node": ">= 0.8.0" @@ -10175,6 +10374,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz", + "integrity": "sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==" + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -10188,22 +10392,29 @@ } }, "node_modules/parse-entities": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", - "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.1.tgz", + "integrity": "sha512-SWzvYcSJh4d/SGLIOQfZ/CoNv6BTlI6YEQ7Nj82oDVnRpwe/Z/F1EMx42x3JAOwGBlCjeCH0BRJQbQ/opHL17w==", "dependencies": { - "character-entities": "^1.0.0", - "character-entities-legacy": "^1.0.0", - "character-reference-invalid": "^1.0.0", - "is-alphanumerical": "^1.0.0", - "is-decimal": "^1.0.0", - "is-hexadecimal": "^1.0.0" + "@types/unist": "^2.0.0", + "character-entities": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" }, "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz", + "integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==" + }, "node_modules/parse5": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz", @@ -10248,27 +10459,24 @@ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" }, "node_modules/path-scurry": { - "version": "1.10.2", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.10.2.tgz", - "integrity": "sha512-7xTavNy5RQXnsjANvVvMkEjvloOinkAjv/Z6Ildz9v2RinZ4SBKTWFOVRbaF8p0vpHnyjV/UwNDdKuUv6M5qcA==", + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", "dependencies": { "lru-cache": "^10.2.0", "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" }, "engines": { - "node": ">=16 || 14 >=14.17" + "node": ">=16 || 14 >=14.18" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/path-scurry/node_modules/lru-cache": { - "version": "10.2.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.2.0.tgz", - "integrity": "sha512-2bIM8x+VAf6JT4bKAljS1qUWgMsqZRPGJS6FSahIMPVvctcNhyVp7AJu7quxOW9jwkryBReKZY5tY5JYv2n/7Q==", - "engines": { - "node": "14 || >=16.14" - } + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==" }, "node_modules/path-type": { "version": "4.0.0", @@ -10339,14 +10547,14 @@ } }, "node_modules/pkg-types": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.0.3.tgz", - "integrity": "sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-1.1.3.tgz", + "integrity": "sha512-+JrgthZG6m3ckicaOB74TwQ+tBWsFl3qVQg7mN8ulwSOElJ7gBhKzj2VkCPnZ4NlF6kEquYU+RIYNVAvzd54UA==", "dev": true, "dependencies": { - "jsonc-parser": "^3.2.0", - "mlly": "^1.2.0", - "pathe": "^1.1.0" + "confbox": "^0.1.7", + "mlly": "^1.7.1", + "pathe": "^1.1.2" } }, "node_modules/possible-typed-array-names": { @@ -10359,9 +10567,9 @@ } }, "node_modules/postcss": { - "version": "8.4.40", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.40.tgz", - "integrity": "sha512-YF2kKIUzAofPMpfH6hOi2cGnv/HrUlfucspc7pDyvv7kGdqXrfj8SCl/t8owkEgKEuu8ZcRjSOxFxVLqwChZ2Q==", + "version": "8.4.41", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.41.tgz", + "integrity": "sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==", "funding": [ { "type": "opencollective", @@ -10454,27 +10662,46 @@ } }, "node_modules/postcss-nested": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.0.1.tgz", - "integrity": "sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==", + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], "dependencies": { - "postcss-selector-parser": "^6.0.11" + "postcss-selector-parser": "^6.1.1" }, "engines": { "node": ">=12.0" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, "peerDependencies": { "postcss": "^8.2.14" } }, + "node_modules/postcss-nested/node_modules/postcss-selector-parser": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.1.tgz", + "integrity": "sha512-b4dlw/9V8A71rLIDsSwVmak9z2DuBUB7CA1/wSdelNEzqsjoSPeADTWNO09lpH49Diy3/JIZ2bSPB1dI3LJCHg==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/postcss-selector-parser": { - "version": "6.0.16", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.16.tgz", - "integrity": "sha512-A0RVJrX+IUkVZbW3ClroRWurercFhieevHB38sr2+l9eUClMqome3LmEmnhlNy+5Mr2EYN6B2Kaw9wYdd+VHiw==", + "version": "6.0.10", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz", + "integrity": "sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w==", + "dev": true, "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" @@ -10578,12 +10805,9 @@ "dev": true }, "node_modules/property-information": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", - "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", - "dependencies": { - "xtend": "^4.0.0" - }, + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-6.5.0.tgz", + "integrity": "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -10676,9 +10900,9 @@ } }, "node_modules/react-i18next": { - "version": "15.0.0", - "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-15.0.0.tgz", - "integrity": "sha512-2O3IgF4zivg57Q6p6i+ChDgJ371IDcEWbuWC6gvoh5NbkDMs0Q+O7RPr4v61+Se32E0V+LmtwePAeqWZW0bi6g==", + "version": "15.0.1", + "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-15.0.1.tgz", + "integrity": "sha512-NwxLqNM6CLbeGA9xPsjits0EnXdKgCRSS6cgkgOdNcPXqL+1fYNl8fBg1wmnnHvFy812Bt4IWTPE9zjoPmFj3w==", "dependencies": { "@babel/runtime": "^7.24.8", "html-parse-stringify": "^3.0.1" @@ -10736,14 +10960,6 @@ "react": ">=18" } }, - "node_modules/react-markdown/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, "node_modules/react-redux": { "version": "9.1.2", "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-9.1.2.tgz", @@ -10952,6 +11168,90 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/refractor/node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/refractor/node_modules/prismjs": { "version": "1.27.0", "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", @@ -11014,14 +11314,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/remark-rehype/node_modules/@types/hast": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", - "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", - "dependencies": { - "@types/unist": "*" - } - }, "node_modules/requires-port": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", @@ -11029,9 +11321,9 @@ "dev": true }, "node_modules/reselect": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.0.tgz", - "integrity": "sha512-aw7jcGLDpSgNDyWBQLv2cedml85qd95/iszJjN988zX1t7AVRJi19d9kto5+W7oCfQ94gyo40dVbT6g2k4/kXg==" + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz", + "integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==" }, "node_modules/resolve": { "version": "1.22.8", @@ -11059,16 +11351,31 @@ } }, "node_modules/restore-cursor": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-4.0.0.tgz", - "integrity": "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==", + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", + "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", "dev": true, "dependencies": { - "onetime": "^5.1.0", - "signal-exit": "^3.0.2" + "onetime": "^7.0.0", + "signal-exit": "^4.1.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/restore-cursor/node_modules/onetime": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", + "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "dev": true, + "dependencies": { + "mimic-function": "^5.0.0" + }, + "engines": { + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -11084,15 +11391,16 @@ } }, "node_modules/rfdc": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.3.1.tgz", - "integrity": "sha512-r5a3l5HzYlIC68TpmYKlxWjmOP6wiPJ1vWv2HeLhNsRZMrCkxeqxiHlQ21oXmQ4F3SiryXBHhAD7JZqvOJjFmg==", + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz", + "integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==", "dev": true }, "node_modules/rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", "dev": true, "dependencies": { "glob": "^7.1.3" @@ -11105,9 +11413,9 @@ } }, "node_modules/rollup": { - "version": "4.14.1", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.14.1.tgz", - "integrity": "sha512-4LnHSdd3QK2pa1J6dFbfm1HN0D7vSK/ZuZTsdyUAlA6Rr1yTouUTL13HaDOGJVgby461AhrNGBS7sCGXXtT+SA==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.19.2.tgz", + "integrity": "sha512-6/jgnN1svF9PjNYJ4ya3l+cqutg49vOZ4rVgsDKxdl+5gpGPnByFXWGyfH9YGx9i3nfBwSu1Iyu6vGwFFA0BdQ==", "dependencies": { "@types/estree": "1.0.5" }, @@ -11119,28 +11427,29 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.14.1", - "@rollup/rollup-android-arm64": "4.14.1", - "@rollup/rollup-darwin-arm64": "4.14.1", - "@rollup/rollup-darwin-x64": "4.14.1", - "@rollup/rollup-linux-arm-gnueabihf": "4.14.1", - "@rollup/rollup-linux-arm64-gnu": "4.14.1", - "@rollup/rollup-linux-arm64-musl": "4.14.1", - "@rollup/rollup-linux-powerpc64le-gnu": "4.14.1", - "@rollup/rollup-linux-riscv64-gnu": "4.14.1", - "@rollup/rollup-linux-s390x-gnu": "4.14.1", - "@rollup/rollup-linux-x64-gnu": "4.14.1", - "@rollup/rollup-linux-x64-musl": "4.14.1", - "@rollup/rollup-win32-arm64-msvc": "4.14.1", - "@rollup/rollup-win32-ia32-msvc": "4.14.1", - "@rollup/rollup-win32-x64-msvc": "4.14.1", + "@rollup/rollup-android-arm-eabi": "4.19.2", + "@rollup/rollup-android-arm64": "4.19.2", + "@rollup/rollup-darwin-arm64": "4.19.2", + "@rollup/rollup-darwin-x64": "4.19.2", + "@rollup/rollup-linux-arm-gnueabihf": "4.19.2", + "@rollup/rollup-linux-arm-musleabihf": "4.19.2", + "@rollup/rollup-linux-arm64-gnu": "4.19.2", + "@rollup/rollup-linux-arm64-musl": "4.19.2", + "@rollup/rollup-linux-powerpc64le-gnu": "4.19.2", + "@rollup/rollup-linux-riscv64-gnu": "4.19.2", + "@rollup/rollup-linux-s390x-gnu": "4.19.2", + "@rollup/rollup-linux-x64-gnu": "4.19.2", + "@rollup/rollup-linux-x64-musl": "4.19.2", + "@rollup/rollup-win32-arm64-msvc": "4.19.2", + "@rollup/rollup-win32-ia32-msvc": "4.19.2", + "@rollup/rollup-win32-x64-msvc": "4.19.2", "fsevents": "~2.3.2" } }, "node_modules/rrweb-cssom": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.6.0.tgz", - "integrity": "sha512-APM0Gt1KoXBz0iIkkdB/kfvGOwC4UuJFeG/c+yV7wSc7q96cG/kJ0HiYCnzivD9SB53cLV1MlHFNfOuPaadYSw==", + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz", + "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==", "dev": true }, "node_modules/run-parallel": { @@ -11235,13 +11544,10 @@ } }, "node_modules/semver": { - "version": "7.6.0", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz", - "integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==", + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", "dev": true, - "dependencies": { - "lru-cache": "^6.0.0" - }, "bin": { "semver": "bin/semver.js" }, @@ -11249,24 +11555,6 @@ "node": ">=10" } }, - "node_modules/semver/node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/semver/node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true - }, "node_modules/set-function-length": { "version": "1.2.2", "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", @@ -11343,23 +11631,23 @@ "dev": true }, "node_modules/signal-exit": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", - "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", - "dev": true + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } }, "node_modules/simple-swizzle": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz", "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==", "dependencies": { - "is-arrayish": "^0.3.1" - } - }, - "node_modules/simple-swizzle/node_modules/is-arrayish": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz", - "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==" + "is-arrayish": "^0.3.1" + } }, "node_modules/slash": { "version": "3.0.0", @@ -11407,9 +11695,9 @@ } }, "node_modules/space-separated-tokens": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", - "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", "funding": { "type": "github", "url": "https://github.com/sponsors/wooorm" @@ -11454,9 +11742,9 @@ } }, "node_modules/string-width": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.1.0.tgz", - "integrity": "sha512-SEIJCWiX7Kg4c129n48aDRwLbFb2LJmXXFrWBG4NGaRtMQ3myKPKbwrD1BKqQn74oCoNMBVrfDEr5M9YxCsrkw==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", "dev": true, "dependencies": { "emoji-regex": "^10.3.0", @@ -11638,15 +11926,6 @@ "url": "https://github.com/sponsors/wooorm" } }, - "node_modules/stringify-entities/node_modules/character-entities-legacy": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", - "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/wooorm" - } - }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -11670,6 +11949,27 @@ "node": ">=8" } }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "dev": true, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/strip-indent": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/strip-indent/-/strip-indent-3.0.0.tgz", @@ -11750,22 +12050,20 @@ } }, "node_modules/sucrase/node_modules/glob": { - "version": "10.3.12", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.3.12.tgz", - "integrity": "sha512-TCNv8vJ+xz4QiqTpfOJA7HvYv+tNIRHKfUWw/q+v2jdgN4ebz+KY9tGx5J4rHP0o84mNP+ApH66HRX8us3Khqg==", + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", "dependencies": { "foreground-child": "^3.1.0", - "jackspeak": "^2.3.6", - "minimatch": "^9.0.1", - "minipass": "^7.0.4", - "path-scurry": "^1.10.2" + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" }, "bin": { "glob": "dist/esm/bin.mjs" }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, "funding": { "url": "https://github.com/sponsors/isaacs" } @@ -11849,9 +12147,9 @@ } }, "node_modules/tailwindcss": { - "version": "3.4.7", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.7.tgz", - "integrity": "sha512-rxWZbe87YJb4OcSopb7up2Ba4U82BoiSGUdoDr3Ydrg9ckxFS/YWsvhN323GMcddgU65QRy7JndC7ahhInhvlQ==", + "version": "3.4.9", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.9.tgz", + "integrity": "sha512-1SEOvRr6sSdV5IDf9iC+NU4dhwdqzF4zKKq3sAbasUWHEM6lsMhX+eNN5gkPx1BvLFEnZQEUFbXnGj8Qlp83Pg==", "dependencies": { "@alloc/quick-lru": "^5.2.0", "arg": "^5.0.2", @@ -11892,6 +12190,18 @@ "node": ">=10" } }, + "node_modules/tailwindcss/node_modules/postcss-selector-parser": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.1.tgz", + "integrity": "sha512-b4dlw/9V8A71rLIDsSwVmak9z2DuBUB7CA1/wSdelNEzqsjoSPeADTWNO09lpH49Diy3/JIZ2bSPB1dI3LJCHg==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/test-exclude": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz", @@ -11954,9 +12264,9 @@ } }, "node_modules/tinybench": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.7.0.tgz", - "integrity": "sha512-Qgayeb106x2o4hNzNjsZEfFziw8IbKqtbXBjVh7VIZfBxfD5M4gWtpyx5+YTae2gJ6Y6Dz/KLepiv16RFeQWNA==", + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.8.0.tgz", + "integrity": "sha512-1/eK7zUnIklz4JUUlL+658n58XO2hHLQfSk1Zf2LKieUjxidN16eKFEoDEfjHc3ohofSSqK3X5yO6VGb6iW8Lw==", "dev": true }, "node_modules/tinypool": { @@ -12102,19 +12412,10 @@ "json5": "lib/cli.js" } }, - "node_modules/tsconfig-paths/node_modules/strip-bom": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", - "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", - "dev": true, - "engines": { - "node": ">=4" - } - }, "node_modules/tslib": { - "version": "2.6.2", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", - "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==" }, "node_modules/type-check": { "version": "0.4.0", @@ -12129,14 +12430,26 @@ } }, "node_modules/type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.1.0.tgz", + "integrity": "sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw==", "dev": true, "engines": { "node": ">=4" } }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/typed-array-buffer": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz", @@ -12224,9 +12537,9 @@ } }, "node_modules/ufo": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.3.tgz", - "integrity": "sha512-Y7HYmWaFwPUmkoQCUIAYpKqkOf+SbVj/2fJJZ4RJMCfZp0rTGwRbzQD+HghfnhKOjL9E01okqz+ncJskGYfBNw==", + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.5.4.tgz", + "integrity": "sha512-UsUk3byDzKd04EyoZ7U4DOlxQaD14JUKQl6/P7wiX4FNvUfm3XL246n9W5AmqwW5RSFJ27NAuM0iLscAOYUiGQ==", "dev": true }, "node_modules/unbox-primitive": { @@ -12245,15 +12558,15 @@ } }, "node_modules/undici-types": { - "version": "5.26.5", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", - "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "version": "6.13.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.13.0.tgz", + "integrity": "sha512-xtFJHudx8S2DSoujjMd1WeWvn7KKWFRESZTMeL1RptAYERu29D6jphMjjY+vn96jvN3kVPDNxU/E13VTaXj6jg==", "devOptional": true }, "node_modules/unified": { - "version": "11.0.4", - "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.4.tgz", - "integrity": "sha512-apMPnyLjAX+ty4OrNap7yumyVAMlKx5IWU2wlzzUdYJO9A8f1p9m/gywF/GM2ZDFcjQPrx59Mc90KwmxsoklxQ==", + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", @@ -12268,11 +12581,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unified/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/unist-util-is": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz", @@ -12285,11 +12593,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unist-util-is/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/unist-util-position": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", @@ -12302,11 +12605,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unist-util-position/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/unist-util-remove-position": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", @@ -12320,11 +12618,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unist-util-remove-position/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/unist-util-stringify-position": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", @@ -12337,11 +12630,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unist-util-stringify-position/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/unist-util-visit": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.0.0.tgz", @@ -12369,16 +12657,6 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/unist-util-visit-parents/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, - "node_modules/unist-util-visit/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/universalify": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", @@ -12389,9 +12667,9 @@ } }, "node_modules/update-browserslist-db": { - "version": "1.0.13", - "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz", - "integrity": "sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz", + "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==", "funding": [ { "type": "opencollective", @@ -12407,8 +12685,8 @@ } ], "dependencies": { - "escalade": "^3.1.1", - "picocolors": "^1.0.0" + "escalade": "^3.1.2", + "picocolors": "^1.0.1" }, "bin": { "update-browserslist-db": "cli.js" @@ -12515,9 +12793,9 @@ } }, "node_modules/use-sync-external-store": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", - "integrity": "sha512-eEgnFxGQ1Ife9bzYs6VLi8/4X6CObHMw9Qr9tPY43iKwsPw8xE8+EFsf/2cFZ5S3esXgpWgtSCtLNS41F+sKPA==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.2.tgz", + "integrity": "sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==", "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } @@ -12528,9 +12806,9 @@ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, "node_modules/vfile": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.1.tgz", - "integrity": "sha512-1bYqc7pt6NIADBJ98UiG0Bn/CHIVOoZ/IyEkqIruLg0mE1BKzkOXY2D6CSqQIcKqgadppE5lrxgWXJmXd7zZJw==", + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.2.tgz", + "integrity": "sha512-zND7NlS8rJYb/sPqkb13ZvbbUoExdbi4w3SfRrMq6R3FvnLQmmfpajJNITuuYm6AZ5uao9vy4BAos3EXBPf2rg==", "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0", @@ -12554,23 +12832,13 @@ "url": "https://opencollective.com/unified" } }, - "node_modules/vfile-message/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, - "node_modules/vfile/node_modules/@types/unist": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.2.tgz", - "integrity": "sha512-dqId9J8K/vGi5Zr7oo212BGii5m3q5Hxlkwy3WpYuKPklmBEvsbMYYyLxAQpSffdLl/gdW0XUpKWFvYmyoWCoQ==" - }, "node_modules/vite": { - "version": "5.3.5", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.3.5.tgz", - "integrity": "sha512-MdjglKR6AQXQb9JGiS7Rc2wC6uMjcm7Go/NHNO63EwiJXfuk9PgqiP/n5IDJCziMkfw9n4Ubp7lttNwz+8ZVKA==", + "version": "5.4.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.0.tgz", + "integrity": "sha512-5xokfMX0PIiwCMCMb9ZJcMyh5wbBun0zUzKib+L65vAZ8GY9ePZMXxFrHbr/Kyll2+LSCY7xtERPpxkBDKngwg==", "dependencies": { "esbuild": "^0.21.3", - "postcss": "^8.4.39", + "postcss": "^8.4.40", "rollup": "^4.13.0" }, "bin": { @@ -12590,6 +12858,7 @@ "less": "*", "lightningcss": "^1.21.0", "sass": "*", + "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.4.0" @@ -12607,6 +12876,9 @@ "sass": { "optional": true }, + "sass-embedded": { + "optional": true + }, "stylus": { "optional": true }, @@ -12641,9 +12913,9 @@ } }, "node_modules/vite-tsconfig-paths": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/vite-tsconfig-paths/-/vite-tsconfig-paths-4.3.2.tgz", - "integrity": "sha512-0Vd/a6po6Q+86rPlntHye7F31zA2URZMbH8M3saAZ/xR9QoGN/L21bxEGfXdWmFdNkqPpRdxFT7nmNe12e9/uA==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/vite-tsconfig-paths/-/vite-tsconfig-paths-5.0.1.tgz", + "integrity": "sha512-yqwv+LstU7NwPeNqajZzLEBVpUFU6Dugtb2P84FXuvaoYA+/70l9MHE+GYfYAycVyPSDYZ7mjOFuYBRqlEpTig==", "dev": true, "dependencies": { "debug": "^4.1.1", @@ -12724,140 +12996,6 @@ } } }, - "node_modules/vitest/node_modules/execa": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", - "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.3", - "get-stream": "^8.0.1", - "human-signals": "^5.0.0", - "is-stream": "^3.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^5.1.0", - "onetime": "^6.0.0", - "signal-exit": "^4.1.0", - "strip-final-newline": "^3.0.0" - }, - "engines": { - "node": ">=16.17" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/vitest/node_modules/get-stream": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", - "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", - "dev": true, - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/human-signals": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", - "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", - "dev": true, - "engines": { - "node": ">=16.17.0" - } - }, - "node_modules/vitest/node_modules/is-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", - "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", - "dev": true, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/mimic-fn": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", - "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/npm-run-path": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", - "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", - "dev": true, - "dependencies": { - "path-key": "^4.0.0" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/onetime": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", - "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", - "dev": true, - "dependencies": { - "mimic-fn": "^4.0.0" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/path-key": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", - "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/vitest/node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "dev": true, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/vitest/node_modules/strip-final-newline": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", - "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", - "dev": true, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/void-elements": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-3.1.0.tgz", @@ -12957,13 +13095,13 @@ } }, "node_modules/which-builtin-type": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.3.tgz", - "integrity": "sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==", + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/which-builtin-type/-/which-builtin-type-1.1.4.tgz", + "integrity": "sha512-bppkmBSsHFmIMSl8BO9TbsyzsvGjVoppt8xUiGzwiu/bhDCGxnpOKCxgqj6GuyHE0mINMDecBFPlOm2hzY084w==", "dev": true, "dependencies": { - "function.prototype.name": "^1.1.5", - "has-tostringtag": "^1.0.0", + "function.prototype.name": "^1.1.6", + "has-tostringtag": "^1.0.2", "is-async-function": "^2.0.0", "is-date-object": "^1.0.5", "is-finalizationregistry": "^1.0.2", @@ -12972,8 +13110,8 @@ "is-weakref": "^1.0.2", "isarray": "^2.0.5", "which-boxed-primitive": "^1.0.2", - "which-collection": "^1.0.1", - "which-typed-array": "^1.1.9" + "which-collection": "^1.0.2", + "which-typed-array": "^1.1.15" }, "engines": { "node": ">= 0.4" @@ -13020,9 +13158,9 @@ } }, "node_modules/why-is-node-running": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.2.2.tgz", - "integrity": "sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", "dev": true, "dependencies": { "siginfo": "^2.0.0", @@ -13035,6 +13173,15 @@ "node": ">=8" } }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/wrap-ansi": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.0.tgz", @@ -13190,9 +13337,9 @@ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" }, "node_modules/yaml": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.4.2.tgz", - "integrity": "sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.5.0.tgz", + "integrity": "sha512-2wWLbGbYDiSqqIKoPjar3MPgB94ErzCtrNE1FdqGuaO0pi2JGjmE8aW8TDZwzU7vuxcGRdL/4gPQwQ7hD5AMSw==", "bin": { "yaml": "bin.mjs" }, diff --git a/frontend/package.json b/frontend/package.json index e11090fae513..f5134a233119 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -10,7 +10,7 @@ "@monaco-editor/react": "^4.6.0", "@nextui-org/react": "^2.4.6", "@react-types/shared": "^3.24.1", - "@reduxjs/toolkit": "^2.2.6", + "@reduxjs/toolkit": "^2.2.7", "@vitejs/plugin-react": "^4.3.1", "@xterm/addon-fit": "^0.10.0", "@xterm/xterm": "^5.4.0", @@ -25,13 +25,13 @@ "react-dom": "^18.3.1", "react-highlight": "^0.15.0", "react-hot-toast": "^2.4.1", - "react-i18next": "^15.0.0", + "react-i18next": "^15.0.1", "react-icons": "^5.2.1", "react-markdown": "^9.0.1", "react-redux": "^9.1.2", "react-syntax-highlighter": "^15.5.0", "tailwind-merge": "^2.4.0", - "vite": "^5.3.5", + "vite": "^5.4.0", "web-vitals": "^3.5.2" }, "scripts": { @@ -59,19 +59,19 @@ ] }, "devDependencies": { - "@tailwindcss/typography": "^0.5.13", + "@tailwindcss/typography": "^0.5.14", "@testing-library/jest-dom": "^6.4.8", "@testing-library/react": "^16.0.0", "@testing-library/user-event": "^14.5.2", - "@types/node": "^20.14.12", + "@types/node": "^22.1.0", "@types/react": "^18.3.3", "@types/react-dom": "^18.3.0", "@types/react-highlight": "^0.12.8", "@types/react-syntax-highlighter": "^15.5.13", - "@typescript-eslint/eslint-plugin": "^7.17.0", - "@typescript-eslint/parser": "^7.17.0", + "@typescript-eslint/eslint-plugin": "^7.18.0", + "@typescript-eslint/parser": "^7.18.0", "@vitest/coverage-v8": "^1.6.0", - "autoprefixer": "^10.4.19", + "autoprefixer": "^10.4.20", "eslint": "^8.57.0", "eslint-config-airbnb": "^19.0.4", "eslint-config-airbnb-typescript": "^18.0.0", @@ -81,14 +81,14 @@ "eslint-plugin-prettier": "^5.2.1", "eslint-plugin-react": "^7.35.0", "eslint-plugin-react-hooks": "^4.6.2", - "husky": "^9.1.2", + "husky": "^9.1.4", "jsdom": "^24.1.1", - "lint-staged": "^15.2.7", - "postcss": "^8.4.40", + "lint-staged": "^15.2.8", + "postcss": "^8.4.41", "prettier": "^3.3.3", - "tailwindcss": "^3.4.7", + "tailwindcss": "^3.4.9", "typescript": "^5.5.4", - "vite-tsconfig-paths": "^4.3.2", + "vite-tsconfig-paths": "^5.0.1", "vitest": "^1.6.0" }, "packageManager": "npm@10.5.0", diff --git a/frontend/src/components/AgentControlBar.tsx b/frontend/src/components/AgentControlBar.tsx index 04f1775b2323..7c4437086d0a 100644 --- a/frontend/src/components/AgentControlBar.tsx +++ b/frontend/src/components/AgentControlBar.tsx @@ -8,6 +8,7 @@ import { changeAgentState } from "#/services/agentStateService"; import store, { RootState } from "#/store"; import AgentState from "#/types/AgentState"; import { clearMessages } from "#/state/chatSlice"; +import Session from "#/services/session"; const IgnoreTaskStateMap: { [k: string]: AgentState[] } = { [AgentState.PAUSED]: [ @@ -83,6 +84,7 @@ function AgentControlBar() { } if (action === AgentState.STOPPED) { + Session._history = []; store.dispatch(clearMessages()); } else { setIsLoading(true); diff --git a/frontend/src/components/AgentStatusBar.tsx b/frontend/src/components/AgentStatusBar.tsx index 9c38e3e20bf6..890fec87c95b 100644 --- a/frontend/src/components/AgentStatusBar.tsx +++ b/frontend/src/components/AgentStatusBar.tsx @@ -1,10 +1,9 @@ -import React, { useEffect } from "react"; +import React from "react"; import { useTranslation } from "react-i18next"; import { useSelector } from "react-redux"; import { I18nKey } from "#/i18n/declaration"; import { RootState } from "#/store"; import AgentState from "#/types/AgentState"; -import beep from "#/utils/beep"; enum IndicatorColor { BLUE = "bg-blue-500", @@ -80,16 +79,6 @@ function AgentStatusBar() { // - Agent is thinking // - Agent is ready // - Agent is not available - useEffect(() => { - if ( - curAgentState === AgentState.AWAITING_USER_INPUT || - curAgentState === AgentState.ERROR || - curAgentState === AgentState.INIT - ) { - if (document.cookie.indexOf("audio") !== -1) beep(); - } - }, [curAgentState]); - return (
{ }, }); - expect(screen.getByText("https://example.com")).toBeInTheDocument(); + expect(screen.getByRole("textbox")).toHaveValue("https://example.com"); expect(screen.getByAltText(/browser screenshot/i)).toBeInTheDocument(); }); }); diff --git a/frontend/src/components/Browser.tsx b/frontend/src/components/Browser.tsx index 891aee45ec6b..89cd700ed341 100644 --- a/frontend/src/components/Browser.tsx +++ b/frontend/src/components/Browser.tsx @@ -1,17 +1,29 @@ -import React from "react"; +import React, { useState } from "react"; import { useTranslation } from "react-i18next"; import { IoIosGlobe } from "react-icons/io"; import { useSelector } from "react-redux"; import { I18nKey } from "#/i18n/declaration"; import { RootState } from "#/store"; +import { updateBrowserTabUrl } from "#/services/browseService"; function Browser(): JSX.Element { const { t } = useTranslation(); - const { url, screenshotSrc } = useSelector( (state: RootState) => state.browser, ); + const [editableUrl, setEditableUrl] = useState(url); + + const handleUrlChange = (e: React.ChangeEvent) => { + setEditableUrl(e.target.value); + }; + + const handleURLBar = (e: React.KeyboardEvent) => { + if (e.key === "Enter") { + updateBrowserTabUrl(editableUrl); + } + }; + const imgSrc = screenshotSrc && screenshotSrc.startsWith("data:image/png;base64,") ? screenshotSrc @@ -20,7 +32,13 @@ function Browser(): JSX.Element { return (
- {url} +
{screenshotSrc ? ( diff --git a/frontend/src/components/Resizable.tsx b/frontend/src/components/Resizable.tsx index 4d7663e45963..5bdc98afb2d8 100644 --- a/frontend/src/components/Resizable.tsx +++ b/frontend/src/components/Resizable.tsx @@ -25,49 +25,58 @@ export function Container({ orientation, initialSize, }: ContainerProps): JSX.Element { - const [firstSize, setFirstSize] = useState(initialSize); - const [dividerPosition, setDividerPosition] = useState( - undefined, - ); + const [firstSize, setFirstSize] = useState(initialSize); + const [dividerPosition, setDividerPosition] = useState(null); const firstRef = useRef(null); useEffect(() => { - if (firstRef.current !== null) { - if (orientation === Orientation.HORIZONTAL) { - firstRef.current.style.width = `${firstSize}px`; - } else { - firstRef.current.style.height = `${firstSize}px`; - } + if (dividerPosition == null || !firstRef.current) { + return undefined; } - }, [firstSize, orientation]); - - const onMouseMove = (e: MouseEvent) => { - e.preventDefault(); - if (firstSize && dividerPosition) { - if (orientation === Orientation.HORIZONTAL) { - const newLeftWidth = firstSize + e.clientX - dividerPosition; - setDividerPosition(e.clientX); - setFirstSize(newLeftWidth); - } else { - const newTopHeight = firstSize + e.clientY - dividerPosition; - setDividerPosition(e.clientY); - setFirstSize(newTopHeight); + const getFirstSizeFromEvent = (e: MouseEvent) => { + const position = + orientation === Orientation.HORIZONTAL ? e.clientX : e.clientY; + return firstSize + position - dividerPosition; + }; + const onMouseMove = (e: MouseEvent) => { + e.preventDefault(); + const newFirstSize = getFirstSizeFromEvent(e); + const { current } = firstRef; + if (current) { + if (orientation === Orientation.HORIZONTAL) { + current.style.width = `${newFirstSize}px`; + } else { + current.style.height = `${newFirstSize}px`; + } } - } - }; - - const onMouseUp = () => { - document.removeEventListener("mousemove", onMouseMove); - document.removeEventListener("mouseup", onMouseUp); - }; + }; + const onMouseUp = (e: MouseEvent) => { + e.preventDefault(); + setFirstSize(getFirstSizeFromEvent(e)); + setDividerPosition(null); + document.removeEventListener("mousemove", onMouseMove); + document.removeEventListener("mouseup", onMouseUp); + }; + document.addEventListener("mousemove", onMouseMove); + document.addEventListener("mouseup", onMouseUp); + return () => { + document.removeEventListener("mousemove", onMouseMove); + document.removeEventListener("mouseup", onMouseUp); + }; + }, [dividerPosition, firstSize, orientation]); const onMouseDown = (e: React.MouseEvent) => { e.preventDefault(); - setDividerPosition( - orientation === Orientation.HORIZONTAL ? e.clientX : e.clientY, - ); - document.addEventListener("mousemove", onMouseMove); - document.addEventListener("mouseup", onMouseUp); + const position = + orientation === Orientation.HORIZONTAL ? e.clientX : e.clientY; + setDividerPosition(position); + }; + + const getStyleForFirst = () => { + if (orientation === Orientation.HORIZONTAL) { + return { width: `${firstSize}px` }; + } + return { height: `${firstSize}px` }; }; return ( @@ -77,7 +86,7 @@ export function Container({ className, )} > -
+
{firstChild}
{ diff --git a/frontend/src/components/chat/ChatInput.test.tsx b/frontend/src/components/chat/ChatInput.test.tsx index 02b786308077..6447ff531209 100644 --- a/frontend/src/components/chat/ChatInput.test.tsx +++ b/frontend/src/components/chat/ChatInput.test.tsx @@ -55,7 +55,7 @@ describe("ChatInput", () => { await user.type(textarea, "Hello, world!"); await user.click(button); - expect(onSendMessage).toHaveBeenCalledWith("Hello, world!"); + expect(onSendMessage).toHaveBeenCalledWith("Hello, world!", "", []); // Additionally, check if it was called exactly once expect(onSendMessage).toHaveBeenCalledTimes(1); }); @@ -68,7 +68,7 @@ describe("ChatInput", () => { await user.type(textarea, "Hello, world!"); await user.keyboard("{Enter}"); - expect(onSendMessage).toHaveBeenCalledWith("Hello, world!"); + expect(onSendMessage).toHaveBeenCalledWith("Hello, world!", "", []); }); it("should NOT send a message when shift + enter is pressed", async () => { diff --git a/frontend/src/components/chat/ChatInput.tsx b/frontend/src/components/chat/ChatInput.tsx index c92dddd1c1d5..270da3b94915 100644 --- a/frontend/src/components/chat/ChatInput.tsx +++ b/frontend/src/components/chat/ChatInput.tsx @@ -1,26 +1,48 @@ import { Textarea } from "@nextui-org/react"; import React from "react"; import { useTranslation } from "react-i18next"; -import { VscArrowUp } from "react-icons/vsc"; +import { VscArrowUp, VscFileMedia } from "react-icons/vsc"; import { twMerge } from "tailwind-merge"; import { I18nKey } from "#/i18n/declaration"; interface ChatInputProps { disabled?: boolean; - onSendMessage: (message: string) => void; + onSendMessage: ( + message: string, + dispatchContent?: string, + image_urls?: string[], + ) => void; } function ChatInput({ disabled = false, onSendMessage }: ChatInputProps) { const { t } = useTranslation(); const [message, setMessage] = React.useState(""); + const [files, setFiles] = React.useState([]); // This is true when the user is typing in an IME (e.g., Chinese, Japanese) const [isComposing, setIsComposing] = React.useState(false); - const handleSendChatMessage = () => { + const convertImageToBase64 = (file: File): Promise => + new Promise((resolve, reject) => { + const reader = new FileReader(); + reader.onloadend = () => { + resolve(reader.result as string); + }; + reader.onerror = reject; + reader.readAsDataURL(file); + }); + + const handleSendChatMessage = async () => { if (message.trim()) { - onSendMessage(message); + let base64images: string[] = []; + if (files.length > 0) { + base64images = await Promise.all( + files.map((file) => convertImageToBase64(file)), + ); + } + onSendMessage(message, "", base64images); setMessage(""); + setFiles([]); } }; @@ -33,6 +55,33 @@ function ChatInput({ disabled = false, onSendMessage }: ChatInputProps) { } }; + const handleFileChange = (event: React.ChangeEvent) => { + if (event.target.files) { + setFiles((prev) => [...prev, ...Array.from(event.target.files!)]); + } + }; + + const removeFile = (index: number) => { + setFiles((prevFiles) => prevFiles.filter((_, i) => i !== index)); + }; + + const handlePaste = (event: React.ClipboardEvent) => { + const clipboardItems = Array.from(event.clipboardData.items); + const pastedFiles: File[] = []; + clipboardItems.forEach((item) => { + if (item.type.startsWith("image/")) { + const file = item.getAsFile(); + if (file) { + pastedFiles.push(file); + } + } + }); + if (pastedFiles.length > 0) { + setFiles((prevFiles) => [...prevFiles, ...pastedFiles]); + event.preventDefault(); + } + }; + return (