diff --git a/.actrc b/.actrc index bb7ec3af..089eff4b 100644 --- a/.actrc +++ b/.actrc @@ -1,13 +1,11 @@ ---workflows .github/act-serial.yaml ---platform js-20.04=ghcr.io/catthehacker/ubuntu:js-20.04 ---platform ubuntu-20.04=ghcr.io/catthehacker/ubuntu:act-20.04 +--workflows .github/workflows/main.yml +# Use this to run only specific variants of a matrix: +# -j test --matrix python-version:3.12 --matrix python-version:3.13 --platform ubuntu-22.04=ghcr.io/catthehacker/ubuntu:act-22.04 +--platform ubuntu-24.04=ghcr.io/catthehacker/ubuntu:act-24.04 --platform ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-latest # If using podman, use one of these, preferably in your ~/.actrc: -# For act-cli.rpm on Fedora 37: -# --container-daemon-socket /run/user/1000/podman/podman.sock -# For the latest act, use this in your ~/.actrc: # --container-daemon-socket unix:///run/user/1000/podman/podman.sock # More information on setting up act can be found at: # https://github.com/xenserver/python-libs/blob/master/CONTRIBUTING.md#running-github-actions-locally-using-act diff --git a/.coveragerc b/.coveragerc index af77f29b..75e467e6 100644 --- a/.coveragerc +++ b/.coveragerc @@ -28,3 +28,5 @@ exclude_lines = precision = 1 include = xcp/* +omit = + xcp/dmv.py diff --git a/.github/act-serial.yaml b/.github/act-serial.yaml deleted file mode 100644 index e427ef5d..00000000 --- a/.github/act-serial.yaml +++ /dev/null @@ -1,66 +0,0 @@ -# actions can be run locally using act and docker, on Fedora 37 also with podman, using: -# https://github.com/nektos/act -# sudo dnf install -y act-cli podman-docker -# act --bind --container-daemon-socket $XDG_RUNTIME_DIR/podman/podman.sock -W .github/workflows/main.yml - -name: Unit tests - -concurrency: # On new workflow, cancel old workflows from the same PR, branch or tag: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -# Checks can be skipped by adding "skip-checks: true" to a commit message, -# or requested by adding "request-checks: true" if disabled by default for pushes: -# https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks#skipping-and-requesting-checks-for-individual-commits -on: [push, pull_request] -env: - PYTHONWARNINGS: "ignore:DEPRECATION" - PIP_ROOT_USER_ACTION: "ignore" # For local testing using act-cli - PIP_NO_WARN_SCRIPT_LOCATION: "0" # For local testing using act-cli - PIP_DISABLE_PIP_VERSION_CHECK: "1" # Reduce noise in logs - -jobs: - test: - strategy: - # See: https://github.com/xenserver/python-libs/pull/26#discussion_r1179482169 - max-parallel: 1 - # Want to get the results of all the tests, don't terminate all on a fail: - fail-fast: true - matrix: - include: - # This tests with Python 2.7 and with Ubuntu-20.04's Python 3.8 for combined py2+3 coverage: - - python-version: '3.6' - os: ubuntu-20.04 - - python-version: '3.10' - os: ubuntu-22.04 - - python-version: '3.11' - os: ubuntu-22.04 - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # Needed by diff-cover to get the changed lines: origin/master..HEAD - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Install Python 2.7 from Ubuntu 20.04 using apt-get install - if: ${{ matrix.os == 'ubuntu-20.04' }} - run: apt-get update && apt-get install -y python2-dev - - - name: Run of tox on ubuntu-latest - if: ${{ startsWith(matrix.python-version, '3.') && matrix.python-version != 3.6 }} - run: | - pip install 'virtualenv<20.22' 'tox==4.5.1' tox-gh-actions - tox --workdir .github/workflows/.tox --recreate - - # tox >= 4.0.0 is needed for using optional-dependencies from pyproject.toml, which is - # is not available for python <= 3.6, so use the python3.8 of Ubuntu-20.04 to install it: - - name: Tox on ${{ matrix.os }} (Using 3.8 to use extras from pyproject.toml) - if: ${{ matrix.python-version == 3.6 }} - run: | - set -xv;curl -sSL https://bootstrap.pypa.io/get-pip.py -o get-pip.py - python3.8 get-pip.py - python3.8 -m pip install 'virtualenv<20.22' 'tox==4.5.1' - tox --workdir .github/workflows/.tox --recreate -e py36-lint-test diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 90403a09..94e716f5 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -20,7 +20,25 @@ env: PIP_DISABLE_PIP_VERSION_CHECK: "1" # Reduce noise in logs jobs: + pre-commit: + env: + SKIP: tox,no-commit-to-branch + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.13 + - uses: actions/setup-node@v4 + with: + node-version: 22 + - uses: pre-commit/action@v3.0.1 + - uses: pre-commit-ci/lite-action@v1.0.3 + if: always() + + test: + needs: pre-commit strategy: # See: https://github.com/xenserver/python-libs/pull/26#discussion_r1179482169 # max-parallel: 1 @@ -37,30 +55,29 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 + if: ${{ !(matrix.python-version == '3.11' && github.actor == 'nektos/act') }} with: fetch-depth: 0 # Needed by diff-cover to get the changed lines: origin/master..HEAD + - name: Set up Python ${{ matrix.python-version }} + if: ${{ !(matrix.python-version == '3.11' && github.actor == 'nektos/act') }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - name: Install Python 2.7 from Ubuntu 20.04 using apt-get install - if: ${{ matrix.os == 'ubuntu-20.04' }} - run: sudo apt-get update && sudo apt-get install -y python2-dev - - name: Install missing cpio in containers of nektos/act - if: ${{ github.actor == 'nektos/act'}} + if: ${{ (matrix.python-version != '3.11' && github.actor == 'nektos/act') }} run: apt-get update && apt-get install -y cpio - - name: Run of tox on ubuntu-latest - if: ${{ startsWith(matrix.python-version, '3.') && matrix.python-version != 3.6 }} + - name: Run of tox on Ubuntu + if: ${{ !(matrix.python-version == '3.11' && github.actor == 'nektos/act') }} run: | - pip install 'virtualenv<20.22' 'tox==4.5.1' tox-gh-actions + pip install tox tox-gh-actions tox --workdir .github/workflows/.tox --recreate - name: Select the coverage file for upload if: | - ( matrix.python-version == '3.6' || matrix.python-version == '3.11' ) && + matrix.python-version == '3.11' && ( !cancelled() && github.actor != 'nektos/act' ) id: coverage run: mv $( ls -t .github/workflows/.tox/*/log/.coverage | head -1 ) .coverage diff --git a/.github/workflows/reviewdog-review.yml b/.github/workflows/reviewdog-review.yml new file mode 100644 index 00000000..09968449 --- /dev/null +++ b/.github/workflows/reviewdog-review.yml @@ -0,0 +1,55 @@ +name: Reviewdog PR Review comments + +# +# The reviewdog steps use reporter: github-pr-review, which submits the results +# as a review comment on the pull request. It needs a GitHub token with +# public_repo scope to post the comments and can only be used in the context +# of a pull request. +# +on: pull_request + +# +# Checks can be skipped by adding "skip-checks: true" to a commit message, +# or requested by adding "request-checks: true" if disabled by default for pushes: +# https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/collaborating-on-repositories-with-code-quality-features/about-status-checks#skipping-and-requesting-checks-for-individual-commits +# + +concurrency: # On new workflow, cancel old workflows from the same PR, branch or tag: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + reviewdog: + runs-on: ubuntu-24.04 + env: + REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.REVIEWDOG_GITHUB_API_TOKEN }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.13 + + - name: Install uv and activate the environment + uses: astral-sh/setup-uv@v6 + with: + activate-environment: true + + - run: uv pip install pylint types-setuptools -r pyproject.toml --extra mypy + + - uses: tsuyoshicho/action-mypy@v4 + name: Run mypy with reviewdog to submit GitHub checks for warnings + if: ${{ github.actor != 'nektos/act' }} + with: + install_types: false + mypy_flags: --exclude python-libs-*/stubs/ + reporter: github-pr-review + level: warning + github_token: ${{ secrets.REVIEWDOG_GITHUB_API_TOKEN }} + + - uses: dciborow/action-pylint@0.1.0 + name: Run pylint with reviewdog to submit GitHub checks for warnings + if: ${{ github.actor != 'nektos/act' }} + with: + reporter: github-pr-review + glob_pattern: "xcp tests" + github_token: ${{ secrets.REVIEWDOG_GITHUB_API_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 917040ce..ac73ec2b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,7 +37,7 @@ # https://github.com/dexpota/cheatsheets/blob/master/pre-commit exclude: "^tests/data" fail_fast: true -default_stages: [commit] +default_stages: [pre-commit] repos: - repo: local hooks: @@ -47,7 +47,7 @@ repos: types: [binary] language: fail - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v6.0.0 hooks: - id: no-commit-to-branch args: [--branch, master] @@ -62,11 +62,21 @@ repos: - isort +- repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.10.0 # Use the ref you want to point at + hooks: + # Enforce that `# type: ignore` annotations always occur with specific codes. + # Sample annotation: # type: ignore[attr-defined,name-defined] + - id: python-check-blanket-type-ignore + + - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.0 + rev: v1.17.1 hooks: - id: mypy additional_dependencies: + - pyfakefs + - pytest_httpserver - pytest-subprocess - types-mock - types-six @@ -74,64 +84,33 @@ repos: - repo: https://github.com/rcmdnk/pyproject-pre-commit - rev: v0.1.9 + rev: v0.4.2 hooks: - id: shellcheck - repo: https://github.com/igorshubovych/markdownlint-cli - rev: v0.42.0 + rev: v0.45.0 hooks: - id: markdownlint -- repo: https://github.com/pycqa/pylint - rev: v2.17.4 - hooks: - - id: pylint - args: - [ - -sn, # Don't display the score - --load-plugins=pylint.extensions.eq_without_hash, - --ignore-imports=yes, - "--disable=duplicate-code,line-too-long", - ] - log_file: ".git/pre-commit-pylint.log" - additional_dependencies: - - pyfakefs - - six - - mock - - pandas - - pytest_forked - - toml - repo: local hooks: - - id: pytype - name: pytype (may take up to two minutes) - entry: sh -c "pytype >/dev/tty" - types: [python] - verbose: true - language: python - language_version: python3.8 - require_serial: true - additional_dependencies: [pytype] - - id: pytest - name: Check pytest unit tests pass + - id: tox + name: pytest unit tests and static analysis using tox types: [python] # entry: sh -c "pytest -x -rf --new-first --show-capture=all >/dev/tty" - entry: sh -c "tox -e py38-covcombine >/dev/tty" + entry: sh -c "tox -e py311-cov-check-pytype-pyright-lint-mdreport >/dev/tty" verbose: true language: python require_serial: true pass_filenames: false -- repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.10.0 # Use the ref you want to point at - hooks: - # Enforce that `# type: ignore` annotations always occur with specific codes. - # Sample annotations: # type: ignore[attr-defined] # type: ignore[attr-defined,name-defined] - - id: python-check-blanket-type-ignore + additional_dependencies: [tox] + + - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v6.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f4092780..52f62e91 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,78 +1,153 @@ # Development setup +To run all tests and all supported static analysis tools, Python 3.11 is needed, +which matches the current Python version of XenServer 9. + +Python 3.10 might work as well (when replacing the references in the config files with 3.10). +Python 3.12 and 3.13 can be used too, but not for running [pytype](https://github.com/google/pytype) +([it does not support 3.12 yet](https://google.github.io/pytype/support.html)). + +On Ubuntu, you can install 3.11 (and also 3.12 and 3.13) from the widely-used Python support PPA: + +```sh +sudo add-apt-repository ppa:deadsnakes/ppa && sudo apt update +sudo apt install -y python3.11 python3.12 python3.13 +``` + +If 3.12 or 3.13 are found by [tox](https://tox.wiki), it will run the unit tests with them as well. + +You can also use [uv to install Python versions](https://docs.astral.sh/uv/concepts/python-versions), +see below on a link and an example how to install uv. + +## Do not use distro-privided Python CI tools + +Python tools (other than the Python interpreters themself) provided by Linux distributions +are "always" out of date and do not work as required. If possible, uninstall/remove those, +even if your environment is based on Ubuntu 24.04. In addition, most problematically, the +distribution-provided Python tools are running using the default Pyton version of the +host system, which may be Python 3.12 or newer and cause running `pytype` that look +like e.g. not finding a python import like `pytest_httpserver` or `pyfakefs`. + ## Create a virtual environment with the test dependencies +[Install `uv`](https://docs.astral.sh/uv/), either using `pip`/`pipx` or the +[installer](https://docs.astral.sh/uv/getting-started/installation/) +and install the extras groups that you need. Example: + +```sh +pip install pipx +pipx install uv +uv venv --python 3.11 .uv-venv +. .uv-venv/bin/activate +uv pip install -r pyproject.toml --extra test mypy pytype pyright tox pre-commit +``` + +The older, slower way is to use pip-compile to the deps from `pyproject.toml`: + ```bash python -m venv .venv . .venv/bin/activate -pip install pip-tools==7.3.0 +pip install pre-commit pip-tools==7.3.0 pip-compile --extra=test,mypy,pyright,pytype,tox -o - pyproject.toml | pip install -r /dev/stdin ``` -## Development setup on Fedora 37 +## Running CI -On Fedora 37, the `tox` rpm installs all Python versions. -But this `tox` is older, so install `tox==4.5.1` using `pip` (see below) +These commands assume you installed the tools using the commands above in a Python 3.11 environment. -```bash -sudo dnf install tox;sudo rpm -e tox +### Run pyright, watching for changes and automatically checking the change + +```sh +pyright -w ``` -But preferably use `tox` from the virtual environment instead. +### Run pytest with coverage (fine-grained, e.g. during test development) + +```sh +pytest --cov -v --new-first -x --show-capture=all -rA [optional: files / select which tests to run] +``` -## Development setup on Ubuntu 24.04 +### Watching and running tests on changes automatically using `pytest-watch` (`ptw`) -Prefer the virtual environment. Alternatively, an option is to use `pipx`: +Install ptw in the Python environment using: -```bash -sudo apt install pipx -pipx install tox; pipx install 'pytest<7';pipx install pylint -pipx inject pytest pytest-{forked,localftpserver,pythonpath,subprocess,timeout} pyfakefs pytest_httpserver six mock -pipx inject pylint pyfakefs six mock pytest{,_forked,-localftpserver} +```sh +pip install pytest-watch ``` -Use the `deadsnakes` ppa to install Python versions like 3.8 and 3.11 (see below) +`ptw` watches changed files and runs `pytest` after changes are saved. +Run `ptw`, and pass the files to watch, e.g.: -## Development setup on Ubuntu 22.04 +```sh +ptw tests/test_* +``` -Usage of to install -other python versions. +### Run mypy (fine-grained, e.g. during development) -```bash -sudo apt update -sudo apt install software-properties-common python{2,3}-dev -sudo add-apt-repository -y ppa:deadsnakes/ppa -sudo apt-get install -y python3.{8,11}{,-distutils} +```sh +mypy [optionally pass the flags or files to select which tests to run] +``` + +### Run pylint (fine-grained, e.g. during development) + +```sh +pylint xcp tests [optionally pass the flags or files to select which tests to run] +``` + +### Run all of the above on one go in defined virtual environments + +```sh +tox -e py311-cov-check-lint-mdreport ``` -Installation of additional python versions for testing different versions: +This also checks code coverage and ends with a test report from the pytest run. +If you just run `tox` without arguments, in addition, the unit tests are run with +all installed Python versions (out of the list of 3.11, 3.12 and 3.13) -- If `deadsnakes/ppa` does not work, e.g. for Python 3.6, `conda` or `pyenv` can be used. - For instructions, see : +### Run pre-commit for all checks - ```bash - sudo apt install -y build-essential xz-utils zlib1g-dev \ - lib{bz2,ffi,lzma,readline,ssl,sqlite3}-dev - curl https://pyenv.run | bash # add displayed commands to .bashrc - ~/.pyenv/bin/pyenv install 3.{6,8,11} && ~/.pyenv/bin/pyenv local 3.{6,8,11} # builds them - ``` +To run all tests, including trailing whitespace checks, run -- For testing on newer Ubuntu which has `python2-dev`, but not `pip2`, install `pip2` this way: +```sh +pre-commit run -av +``` - ```bash - curl https://bootstrap.pypa.io/pip/2.7/get-pip.py --output get-pip.py;sudo python2 get-pip.py - ``` +## Alternative: installing pytest packages using `pipx` -You may want to install `pytype` in your user environment to run it directly without `tox`: +`pipx` installs tools in `~/.local/share/pipx/venvs` which can be an alternate +way to install up-to-date python tools ```bash -# On Python != 3.8, pytype can't import xml.dom.minidom, use 3.8: -python3.8 -m pip install pytype -python -m pip install tabulate -./pytype_runner.py +python3.11 -m pip install pipx +pipx install tox; pipx install 'pytest<7';pipx install pylint pyright +pipx inject pytest pytest-{forked,localftpserver,pythonpath,subprocess,timeout} pyfakefs pytest_httpserver six mock +pipx inject pylint pyfakefs six mock pytest{,_forked,-localftpserver} ``` -## Installation of dependencies using `pip` +### Updating the documentation + +For consistently well-spaced documentation, all Markdown files are checked +in CI using Markdownlint, which ensures that e.g. code blocks are separated +by space from the preceeding and following paragraphs and so on. This helps +to keep the Markdown source as well-readable as the rendered Markdown. + +To check and fix Markdown files quickly, use: + +```sh +pre-commit run -av markdownlint +``` + +### Removing trailing whitepace and fixing files to have only one trailing newline + +These fixers detect and fix trailing whitespace and trailing newlines in files +to keep commits clean of adding trailing whitespace and are used in GitHub CI: + +```sh +pre-commit run -av trailing-whitespace +pre-commit run -av end-of-file-fixer +``` + +## Background information on the provided tools ### Testing locally and in GitHub CI using `tox` @@ -156,6 +231,9 @@ For more information to debug `pytest` test suites see ## Running GitHub actions locally using `act` +With `docker` (or `podman`) installed, [act](https://github.com/nektos/act) can be used to run +the CI jobs configured in [.actrc](.actrc): + - `act` uses `docker` (also mimicked by `podman-docker`) to run GitHub actions locally - While `act` does not use the same GitHub runner images, they are similar. diff --git a/README.md b/README.md index b7e5c133..ee12582e 100644 --- a/README.md +++ b/README.md @@ -58,48 +58,9 @@ open a recent workflow run the latest and scroll down until you see the tables! - `.github/act-serial.yaml`: Configuration for the jobs run by the local GitHub actions runner `act` - `.pylintrc`: Configuration file of `Pylint` -## Installation and Setup of a development environment - -For the installation of the general development dependencies, visit [INSTALL.md](INSTALL.md) - -### Updating tests using `pytest-watch` (`ptw`) - -- `pip install pytest-watch` - `ptw` watches changed files and runs `pytest` after changes are saved. - - Then run `ptw` on the code/tests you work on, e.g.: `ptw tests/test_pci_*` and edit the files. - -## Example development workflow - -- Run the tests for at also with `LC_ALL=C python3.6 -m pytest` to check for any `ascii` codec - issues by Python3.6 -- Test with `python2.7 -m pytest` -- Run `mypy` (without any arguments - The configuration is in `pyproject.toml`) -- Run `./pytype_runner.py` -- Run `tox -e py36-lint` and fix any `Pylint` warnings -- Run `tox -e py310-covcombine-check` and fix any missing diff-coverage. -- Run `tox` for the full CI test suite -- Run `act` for the full CI test suite in local containers (similar to GitHub action containers) -- Commit with `--signoff` on a new branch and push it and check the triggered GitHub Action run succeeds. -- Open a new PR - -The list of `virtualenvs` configured in tox can be shown using this command: `tox -av` - -```ml -$ tox -av -default environments: -py36-lint -> Run in a py36 virtualenv: Run pylint and fail on warnings remaining on lines in the diff to master -py311-pyright -> Run in a py311 virtualenv: Run pyright for static analyis -py38-pytype -> Run in a py38 virtualenv: Run pytype for static analyis, intro: https://youtu.be/abvW0mOrDiY -py310-covcombine-check -> Run in a py310 virtualenv: Generate combined coverage reports with py27-test coverage merged Run mypy for static analyis - -additional environments: -cov -> Run in a python virtualenv: Generate coverage html reports (incl. diff-cover) for this environment -covcp -> Run in a python virtualenv: Copy the generated .converage and coverage.xml to the UPLOAD_DIR dir -fox -> Run in a python virtualenv: Generate combined coverage html reports and open them in firefox -mdreport -> Run in a python virtualenv: Make a test report (which is shown in the GitHub Actions Summary Page) -test -> Run in a python virtualenv: Run pytest in this environment with --cov for use in other stages -``` +## Installation and setup of the development environment -If you have only one version of Python3, that works too. Use: `tox -e py-test` +For the installation of the general development dependencies, visit [CONTRIBUTING.md](CONTRIBUTING.md) ## Static analysis using mypy, pylint, pyright and pytype diff --git a/pylint_runner.py b/pylint_runner.py index b013f66f..5edb7cfd 100755 --- a/pylint_runner.py +++ b/pylint_runner.py @@ -227,10 +227,12 @@ def write_results_as_markdown_tables(branch_url, fp, panda_overview, panda_resul me = os.path.basename(__file__) mylink = f"[{me}]({branch_url}/{me})" # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#multiline-markdown-content - fp.write(f"### PyLint breakdown from {mylink} on **xcp/\\*\\*/*.py**\n") + fp.write(f"### PyLint summary (by {mylink})\n") fp.write(panda_overview.to_markdown()) - fp.write(f"\n### PyLint results from {mylink} on **xcp/\\*\\*/*.py**\n") - fp.write(panda_results.to_markdown()) + fp.write("\n") + if not panda_results.empty: + fp.write("### PyLint results\n") + fp.write(panda_results.to_markdown()) if __name__ == "__main__": @@ -242,7 +244,7 @@ def write_results_as_markdown_tables(branch_url, fp, panda_overview, panda_resul branch = os.environ.get("GITHUB_HEAD_REF", None) or os.environ.get("GITHUB_REF_NAME", None) ghblob_url = f"{server_url}/{repository}/blob/{branch}" - # Like the previous run-pylint.sh, check the xcp module by default: + # By default, run pylint on xcp/ and tests/ dirs_to_check = sys.argv[1:] if len(sys.argv) > 1 else ["xcp", "tests"] # https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-job-summary @@ -255,5 +257,8 @@ def write_results_as_markdown_tables(branch_url, fp, panda_overview, panda_resul # pylint_txt = os.environ.get("ENVLOGDIR", ".tox") + "/pylint.txt" - print("Checking:", str(dirs_to_check) + "; Writing report to:", step_summary) + print("Checking:", " ".join(dirs_to_check) + ". Writing report to:", step_summary) main(dirs_to_check, step_summary, pylint_txt, ghblob_url) + # Show the report + with open(step_summary, "r", encoding="utf-8") as fp: + print(fp.read()) diff --git a/pyproject.toml b/pyproject.toml index 9b25f035..10da84b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,8 +39,6 @@ dependencies = [ # Info: xcp imports branding, but branding has no distribution, so we can't list it. # A stub for testing is in ./stubs, which we configure to be in PYTHONPATH by pytest. "six", - "configparser; python_version < '3.0'", - "pyliblzma; python_version < '3.0'" ] [project.optional-dependencies] @@ -50,18 +48,19 @@ test = [ "pytest>=7", "pytest-cov", "pytest-forked", - "pytest_httpserver; python_version >= '3.7'", - "pytest-localftpserver; python_version >= '3.7'", - "pytest-localftpserver==0.5.1; python_version <= '3.6'", - "pytest-subprocess; python_version >= '3.6'", + "pytest_httpserver", + "pytest-localftpserver", + "pytest-subprocess", "pytest-timeout", "typing_extensions" ] coverage = [ + "python-libs[test]", "coverage[toml]", "diff_cover" ] mypy = [ + "python-libs[test]", "lxml", "mypy", "mypy-extensions", @@ -71,20 +70,13 @@ mypy = [ "types-toml", ] pytype = [ + "python-libs[test]", "pandas", "pytype", ] tox = [ - # The latest versions of tox need 'py>=1.11.0' and this is not stated in the deps of tox-4.5.1. - "py>=1.11.0", - # Use tox==4.5.1: tox>=4 is needed for reading the extras from pyproject.toml - # while tox>=4.5.2 depends on virutalenv>=20.23, which breaks Python 2.7: - "tox==4.5.1; python_version >= '3.7'", - "tox-gh-actions; python_version >= '3.7'", - # virtualenv-20.22 breaks using python2.7 for the `py27` virtualenv with tox and newer - # versions even break py36(which is also EOL) because py36 does not support - # from __future__ import annotations - "virtualenv<20.22", + "tox>=4.6", + "tox-gh-actions", ] [project.urls] diff --git a/pytest.ini b/pytest.ini index 6c737ec0..32022ae5 100644 --- a/pytest.ini +++ b/pytest.ini @@ -27,4 +27,4 @@ log_cli_level=INFO # # Add directory to find the example branding.py (needed by bootloader.py) in ./stubs: # (Requires pytest >= 7.0.0): -pythonpath=stubs \ No newline at end of file +pythonpath=stubs diff --git a/pytype_runner.py b/pytype_runner.py index 3ee62e98..12b804f5 100755 --- a/pytype_runner.py +++ b/pytype_runner.py @@ -77,7 +77,7 @@ def run_pytype(command: List[str], branch_url: str, errorlog: TextIO, results): ok = True while ok: for key, _ in sel.select(): - line = key.fileobj.readline() # type: ignore + line = key.fileobj.readline() # type: ignore[union-attr] if not line: ok = False break @@ -152,6 +152,13 @@ def to_markdown(me, fp, returncode, results, branch_url): def setup_and_run_pytype_action(scriptname: str): config = load("pyproject.toml") pytype = config["tool"].get("pytype") + required_version = pytype.get("python_version") + if required_version: + if f"{sys.version_info[0]}.{sys.version_info[1]}" != required_version: + print(f"pytype requires to run in exactly Python {required_version}!") + print("It does not support 3.12: Fails to find pytest_httpserver with it") + sys.exit(1) + xfail_files = pytype.get("xfail", []) if pytype else [] repository_url = config["project"]["urls"]["repository"].strip(" /") filelink_baseurl = repository_url + "/blob/master" @@ -166,6 +173,7 @@ def setup_and_run_pytype_action(scriptname: str): # Write the panda dable to a markdown output file: summary_file = os.environ.get("GITHUB_STEP_SUMMARY", None) if summary_file: + os.makedirs(os.path.dirname(summary_file), exist_ok=True) with open(summary_file, "w", encoding="utf-8") as fp: to_markdown(scriptname, fp, retcode, results, filelink_baseurl) else: diff --git a/stubs/parameterized/__init__.pyi b/stubs/parameterized/__init__.pyi deleted file mode 100644 index aed931d0..00000000 --- a/stubs/parameterized/__init__.pyi +++ /dev/null @@ -1 +0,0 @@ -from .parameterized import param as param, parameterized as parameterized, parameterized_class as parameterized_class diff --git a/stubs/parameterized/parameterized.pyi b/stubs/parameterized/parameterized.pyi deleted file mode 100644 index 23dba7d7..00000000 --- a/stubs/parameterized/parameterized.pyi +++ /dev/null @@ -1,76 +0,0 @@ -from _typeshed import Incomplete -from collections import OrderedDict as MaybeOrderedDict -from typing import NamedTuple - -# MaybeOrderedDict = dict - -class SkipTest(Exception): ... - -PY3: Incomplete -PY2: Incomplete -PYTEST4: Incomplete - -class InstanceType: ... - -lzip: Incomplete -text_type = str -string_types: Incomplete -bytes_type = bytes - -def make_method(func, instance, type): ... -def to_text(x): ... - -class CompatArgSpec(NamedTuple): - args: Incomplete - varargs: Incomplete - keywords: Incomplete - defaults: Incomplete - -def getargspec(func): ... -def skip_on_empty_helper(*a, **kw) -> None: ... -def reapply_patches_if_need(func): ... -def delete_patches_if_need(func) -> None: ... - -class _param(NamedTuple): - args: Incomplete - kwargs: Incomplete - -class param(_param): - def __new__(cls, *args, **kwargs): ... - @classmethod - def explicit(cls, args: Incomplete | None = ..., kwargs: Incomplete | None = ...): ... - @classmethod - def from_decorator(cls, args): ... - -class QuietOrderedDict(MaybeOrderedDict): ... # type: ignore - -def parameterized_argument_value_pairs(func, p): ... -def short_repr(x, n: int = ...): ... -def default_doc_func(func, num, p): ... -def default_name_func(func, num, p): ... -def set_test_runner(name) -> None: ... -def detect_runner(): ... - -class parameterized: - get_input: Incomplete - doc_func: Incomplete - skip_on_empty: Incomplete - def __init__(self, input, doc_func: Incomplete | None = ..., skip_on_empty: bool = ...) -> None: ... - def __call__(self, test_func): ... - def param_as_nose_tuple(self, test_self, func, num, p): ... - def assert_not_in_testcase_subclass(self) -> None: ... - @classmethod - def input_as_callable(cls, input): ... - @classmethod - def check_input_values(cls, input_values): ... - @classmethod - def expand(cls, input, name_func: Incomplete | None = ..., doc_func: Incomplete | None = ..., skip_on_empty: bool = ..., **legacy): ... - @classmethod - def param_as_standalone_func(cls, p, func, name): ... - @classmethod - def to_safe_name(cls, s): ... - -def parameterized_class(attrs, input_values: Incomplete | None = ..., class_name_func: Incomplete | None = ..., classname_func: Incomplete | None = ...): ... -def unwrap_mock_patch_func(f): ... -def get_class_name_suffix(params_dict): ... -def default_class_name_func(cls, num, params_dict): ... diff --git a/stubs/parameterized/test.pyi b/stubs/parameterized/test.pyi deleted file mode 100644 index aab4e91f..00000000 --- a/stubs/parameterized/test.pyi +++ /dev/null @@ -1,100 +0,0 @@ -from .parameterized import PY2 as PY2, PY3 as PY3, PYTEST4 as PYTEST4, SkipTest as SkipTest, detect_runner as detect_runner, param as param, parameterized as parameterized, parameterized_argument_value_pairs as parameterized_argument_value_pairs, parameterized_class as parameterized_class, short_repr as short_repr -from _typeshed import Incomplete -from unittest import TestCase - -def assert_contains(haystack, needle) -> None: ... - -runner: Incomplete -UNITTEST: Incomplete -NOSE2: Incomplete -PYTEST: Incomplete -SKIP_FLAGS: Incomplete -missing_tests: Incomplete - -def expect(skip, tests: Incomplete | None = ...) -> None: ... - -test_params: Incomplete - -def test_naked_function(foo, bar: Incomplete | None = ...) -> None: ... - -class TestParameterized: - def test_instance_method(self, foo, bar: Incomplete | None = ...) -> None: ... - -class TestSetupTeardown: - stack: Incomplete - actual_order: str - def setUp(self) -> None: ... - def tearDown(self) -> None: ... - def test_setup(self, count, *a) -> None: ... - -def custom_naming_func(custom_tag, kw_name): ... - -class TestParameterizedExpandWithMockPatchForClass(TestCase): - def test_one_function_patch_decorator(self, foo, mock_umask, mock_getpid) -> None: ... - def test_multiple_function_patch_decorator(self, foo, bar, mock_umask, mock_fdopen, mock_getpid) -> None: ... - -class TestParameterizedExpandWithNoExpand: - def test_patch_class_no_expand(self, foo, bar, mock_umask, mock_getpid) -> None: ... - -class TestParameterizedExpandWithNoMockPatchForClass(TestCase): - def test_one_function_patch_decorator(self, foo, mock_umask) -> None: ... - def test_multiple_function_patch_decorator(self, foo, bar, mock_umask, mock_fdopen) -> None: ... - -class TestParameterizedExpandWithNoMockPatchForClassNoExpand: - def test_patch_no_expand(self, foo, bar, mock_umask) -> None: ... - -def test_mock_patch_standalone_function(foo, mock_umask) -> None: ... - -class TestParamerizedOnTestCase(TestCase): - def test_on_TestCase(self, foo, bar: Incomplete | None = ...) -> None: ... - def test_on_TestCase2(self, foo, bar: Incomplete | None = ...) -> None: ... - -class TestParameterizedExpandDocstring(TestCase): - def test_custom_doc_func(self, foo, bar: Incomplete | None = ...) -> None: ... - def test_single_line_docstring(self, foo) -> None: ... - def test_empty_docstring(self, foo) -> None: ... - def test_multiline_documentation(self, foo) -> None: ... - def test_unicode_docstring(self, foo) -> None: ... - def test_default_values_get_correct_value(self, foo, bar: int = ...) -> None: ... - def test_with_leading_newline(self, foo, bar: int = ...) -> None: ... - -def test_warns_when_using_parameterized_with_TestCase() -> None: ... -def test_helpful_error_on_invalid_parameters() -> None: ... -def test_helpful_error_on_empty_iterable_input() -> None: ... -def test_skip_test_on_empty_iterable() -> None: ... -def test_helpful_error_on_empty_iterable_input_expand() -> None: ... -def test_wrapped_iterable_input(foo) -> None: ... -def test_helpful_error_on_non_iterable_input(): ... -def tearDownModule() -> None: ... -def test_old_style_classes() -> None: ... - -class TestOldStyleClass: - def test_old_style_classes(self, param) -> None: ... - -def test_parameterized_argument_value_pairs(func_params, p, expected) -> None: ... -def test_short_repr(input, expected, n: int = ...) -> None: ... -def test_with_docstring(input) -> None: ... - -cases_over_10: Incomplete - -def test_cases_over_10(input, expected) -> None: ... - -class TestParameterizedClass(TestCase): - def test_method_a(self) -> None: ... - def test_method_b(self) -> None: ... - def testCamelCaseMethodC(self) -> None: ... - -class TestNamedParameterizedClass(TestCase): - def test_method(self) -> None: ... - -class TestParameterizedClassDict(TestCase): - foo: int - bar: str - def setUp(self) -> None: ... - def tearDown(self) -> None: ... - def test_method(self) -> None: ... - -class TestUnicodeDocstring: - def test_with_docstring(self, param) -> None: ... - -def test_missing_argument_error() -> None: ... diff --git a/stubs/pytest_httpserver.pyi b/stubs/pytest_httpserver.pyi deleted file mode 100644 index 0ee27850..00000000 --- a/stubs/pytest_httpserver.pyi +++ /dev/null @@ -1,134 +0,0 @@ -import abc -from enum import Enum -from ssl import SSLContext -from typing import Any, Callable, Iterable, Mapping, MutableMapping, Optional, Pattern, Tuple, Union - -# pylint: disable=import-error, no-name-in-module, super-init-not-called, multiple-statements, too-few-public-methods, invalid-name, line-too-long -from _typeshed import Incomplete - -from werkzeug.wrappers import Request, Response - -URI_DEFAULT: str -METHOD_ALL: str -HEADERS_T = Union[Mapping[str, Union[str, Iterable[str]]], Iterable[Tuple[str, str]]] -HVMATCHER_T = Callable[[str, Optional[str], str], bool] - -class Error(Exception): ... -class NoHandlerError(Error): ... -class HTTPServerError(Error): ... -class NoMethodFoundForMatchingHeaderValueError(Error): ... - -class WaitingSettings: - raise_assertions: Incomplete - stop_on_nohandler: Incomplete - timeout: Incomplete - def __init__(self, raise_assertions: bool = ..., stop_on_nohandler: bool = ..., timeout: float = ...) -> None: ... - -class Waiting: - def __init__(self) -> None: ... - def complete(self, result: bool): ... - @property - def result(self) -> bool: ... - @property - def elapsed_time(self) -> float: ... - -class HeaderValueMatcher: - DEFAULT_MATCHERS: MutableMapping[str, Callable[[Optional[str], str], bool]] - matchers: Incomplete - def __init__(self, matchers: Optional[Mapping[str, Callable[[Optional[str], str], bool]]] = ...) -> None: ... - @staticmethod - def authorization_header_value_matcher(actual: Optional[str], expected: str) -> bool: ... - @staticmethod - def default_header_value_matcher(actual: Optional[str], expected: str) -> bool: ... - def __call__(self, header_name: str, actual: Optional[str], expected: str) -> bool: ... - -class URIPattern(abc.ABC, metaclass=abc.ABCMeta): - @abc.abstractmethod - def match(self, uri: str) -> bool: ... - -class RequestMatcher: - uri: Incomplete - method: Incomplete - query_string: Incomplete - query_matcher: Incomplete - json: Incomplete - headers: Incomplete - data: Incomplete - data_encoding: Incomplete - header_value_matcher: Incomplete - # def __init__(self) - def match_data(self, request: Request) -> bool: ... - def match_uri(self, request: Request) -> bool: ... - def match_json(self, request: Request) -> bool: ... - def match(self, request: Request) -> bool: ... - -class RequestHandlerBase(abc.ABC, metaclass=abc.ABCMeta): - def respond_with_json(self, response_json, status: int = ..., headers: Optional[Mapping[str, str]] = ..., content_type: str = ...): ... - def respond_with_data(self, response_data: Union[str, bytes] = ..., status: int = ..., headers: Optional[HEADERS_T] = ..., mimetype: Optional[str] = ..., content_type: Optional[str] = ...): ... - @abc.abstractmethod - def respond_with_response(self, response: Response): ... - -class RequestHandler(RequestHandlerBase): - matcher: Incomplete - request_handler: Incomplete - def __init__(self, matcher: RequestMatcher) -> None: ... - def respond(self, request: Request) -> Response: ... - def respond_with_handler(self, func: Callable[[Request], Response]): ... - def respond_with_response(self, response: Response): ... - -class HandlerType(Enum): - PERMANENT: str - ONESHOT: str - ORDERED: str - -class HTTPServerBase(abc.ABC, metaclass=abc.ABCMeta): - host: Incomplete - port: Incomplete - server: Incomplete - server_thread: Incomplete - assertions: Incomplete - handler_errors: Incomplete - log: Incomplete - ssl_context: Incomplete - no_handler_status_code: int - def __init__(self, host: str, port: int, ssl_context: Optional[SSLContext] = ...) -> None: ... - def clear(self) -> None: ... - def clear_assertions(self) -> None: ... - def clear_handler_errors(self) -> None: ... - def clear_log(self) -> None: ... - def url_for(self, suffix: str): ... - def create_matcher(self, *args, **kwargs) -> RequestMatcher: ... - def thread_target(self) -> None: ... - def is_running(self) -> bool: ... - def start(self) -> None: ... - def stop(self) -> None: ... - def add_assertion(self, obj) -> None: ... - def check(self) -> None: ... - def check_assertions(self) -> None: ... - def check_handler_errors(self) -> None: ... - def respond_nohandler(self, request: Request, extra_message: str = ...): ... - @abc.abstractmethod - def dispatch(self, request: Request) -> Response: ... - def application(self, request: Request): ... - def __enter__(self): ... - def __exit__(self, *args, **kwargs) -> None: ... - @staticmethod - def format_host(host): ... - -class HTTPServer(HTTPServerBase): - DEFAULT_LISTEN_HOST: str - DEFAULT_LISTEN_PORT: int - ordered_handlers: Incomplete - oneshot_handlers: Incomplete - handlers: Incomplete - permanently_failed: bool - default_waiting_settings: Incomplete - def __init__(self, host=..., port=..., ssl_context: Optional[SSLContext] = ..., default_waiting_settings: Optional[WaitingSettings] = ...) -> None: ... - def clear(self) -> None: ... - def clear_all_handlers(self) -> None: ... - def expect_request(self, uri: Union[str, URIPattern, Pattern[str]], method: str = ..., data: Union[str, bytes, None] = ..., data_encoding: str = ..., header_value_matcher: Optional[HVMATCHER_T] = ..., handler_type: HandlerType = ..., json: Any = ...) -> RequestHandler: ... - def format_matchers(self) -> str: ... - def respond_nohandler(self, request: Request, extra_message: str = ...): ... - def respond_permanent_failure(self): ... - def dispatch(self, request: Request) -> Response: ... - def wait(self, raise_assertions: Optional[bool] = ..., stop_on_nohandler: Optional[bool] = ..., timeout: Optional[float] = ...): ... diff --git a/tests/test_bootloader.py b/tests/test_bootloader.py index 4a34cec6..c67dbfa2 100644 --- a/tests/test_bootloader.py +++ b/tests/test_bootloader.py @@ -19,15 +19,16 @@ def _test_cfg(self, cfg): universal_newlines=True) assert proc.stdout is not None # for pyright, to ensure it is valid + # check the diff output, working around trailing whitespace issues self.assertEqual(proc.stdout.read(), '''5a6,13 > if [ -s $prefix/grubenv ]; then > load_env > fi -> +> ''' + ''' > if [ -n "$override_entry" ]; then > set default=$override_entry > fi -> +> ''' + ''' ''') proc.stdout.close() proc.wait() diff --git a/tests/test_ifrename_dynamic.py b/tests/test_ifrename_dynamic.py index e3b45a64..d4430281 100644 --- a/tests/test_ifrename_dynamic.py +++ b/tests/test_ifrename_dynamic.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json import logging +import time import unittest from io import StringIO @@ -20,8 +21,11 @@ def setUp(self): openLog(self.logbuf, logging.NOTSET) def tearDown(self): + # Workaround flaky unclosed file warnings on GitHub runners closeLogs() + time.sleep(0.1) self.logbuf.close() + time.sleep(0.2) def test_null(self): self.assertLoadDynamicRules("") diff --git a/tox.ini b/tox.ini index 535d80ec..d9be36ec 100644 --- a/tox.ini +++ b/tox.ini @@ -1,4 +1,5 @@ [tox] +min_version = 4.6 # Set the envlist: # Defines the environments that tox runs by default (when -e is not used) # @@ -11,20 +12,11 @@ # .github/workflows/main.yml is set up to test with 3.11, 3.12 and 3.13 in parallel. # Therefore, use three environments: One with 3.11, one with 3.12 and one with 3.13: # -envlist = py311-covcp-check-mdreport, py312-cov-pytype, py313-cov-lint-pyright +envlist = py311-covcp-check-pytype-mdreport, py312-cov, py313-cov-lint-pyright isolated_build = true skip_missing_interpreters = true requires = - # The latest versions of tox need 'py>=1.11.0' and this is not stated in the deps of tox-4.5.1. - py>=1.11.0 - # Use tox==4.5.1: tox>=4 is needed for reading the extras from pyproject.toml - # while tox>=4.5.2 depends on virutalenv>=20.23, which breaks Python 2.7: - tox==4.5.1; python_version >= '3.7' - tox-gh-actions; python_version >= '3.7' - # virtualenv-20.22 breaks using python2.7 for the `py27` virtualenv with tox and newer - # versions even break py36(which is also EOL) because py36 does not support - # from __future__ import annotations - virtualenv<20.22 + tox-gh-actions [test] description = Run pytest in this environment with --cov for use in other stages @@ -42,7 +34,6 @@ commands = [testenv] description = Run in a {basepython} virtualenv: cov: {[cov]description} - covcombine: {[covcombine]description} covcp: Copy the generated .converage and coverage.xml to the UPLOAD_DIR dir fox: {[fox]description} lint: {[lint]description} @@ -54,27 +45,24 @@ description = Run in a {basepython} virtualenv: pytype: Run pytype for static analyis, intro: https://youtu.be/abvW0mOrDiY # checkers(mypy) need the pytest dependices as well: extras = - {check,pytype}: {[check]extras} - {cov,covcp,covcombine,fox,check,lint,test,pytype,pyright,mdreport}: {[test]extras} - {cov,covcp,covcombine,fox}: {[cov]extras} + check: {[check]extras} + pytype: {[pytype]extras} + {lint,test,pyright,mdreport}: {[test]extras} + {cov,covcp,fox}: {[cov]extras} deps = mdreport: pytest-md-report - {py27-test,py27-cov}: pyftpdlib - {py27-test,py27-cov}: pylint - {cov,covcp,covcombine,fox}: coverage[toml] - {cov,covcp,covcombine,fox}: diff-cover + {cov,covcp,fox}: coverage[toml] + {cov,covcp,fox}: diff-cover {lint,fox}: {[lint]deps} pyright: pyright - pytype: {[pytype]deps} allowlist_externals = - {cov,covcp,covcombine,fox,check,lint,test,mdreport}: echo - {cov,covcp,covcombine,fox,check,lint,test,mdreport}: sh - {cov,covcp,covcombine,fox}: cp - {covcombine,fox}: tox + {cov,covcp,fox,check,lint,test,mdreport}: echo + {cov,covcp,fox,check,lint,test,mdreport}: sh + {cov,covcp,fox}: cp check: cat fox: firefox passenv = - {pytype,covcombine,lint,test}: GITHUB_STEP_SUMMARY + {pytype,lint,test}: GITHUB_STEP_SUMMARY pytype: GITHUB_SERVER_URL pytype: GITHUB_REPOSITORY pytype: GITHUB_HEAD_REF @@ -110,20 +98,16 @@ commands = pyright: {[pyright]commands} check: {[check]commands} pytype: {[pytype]commands} - {cov,covcp,covcombine,check,fox,test,mdreport}: {[test]commands} - # covcombine shall not call [cov]commands: diff-cover shall check the combined cov: + {cov,covcp,check,fox,test,mdreport}: {[test]commands} {cov,covcp}: {[cov]commands} - {py27-test}: pylint --py3k --disable=no-absolute-import xcp/ covcp: cp -av {envlogdir}/coverage.xml {env:UPLOAD_DIR:.} - covcombine: {[covcombine]commands} - fox: {[covcombine]commands} + fox: {[cov]commands} fox: {[lint]commands} fox: {[fox]commands} [cov] description = Generate coverage html reports (incl. diff-cover) for this environment setenv = PY3_DIFFCOVER_OPTIONS=--ignore-whitespace --show-uncovered - py27: PY3_DIFFCOVER_OPTIONS= extras = coverage test commands = @@ -136,27 +120,6 @@ commands = --html-report {envlogdir}/coverage-diff.html \ {envlogdir}/coverage.xml -[covcombine] -description = Generate combined coverage reports with py27-test coverage merged -commands = - tox -e py27-test - sh -c 'export COVERAGE_FILE=$COVERAGE_FILE-combined; \ - coverage combine --keep {envlogdir}/../../py27-test/log/.coverage {envlogdir}/.coverage;\ - coverage xml -o {envlogdir}/coverage.xml;\ - coverage html -d {envlogdir}/htmlcov;\ - coverage html -d {envlogdir}/htmlcov-tests --include="tests/*"' - sh -c '\ - diff-cover --compare-branch=origin/master --ignore-staged --ignore-unstaged \ - --ignore-whitespace --show-uncovered --fail-under {env:DIFF_COVCOMBINE_MIN:100} \ - --html-report {envlogdir}/coverage-diff.html \ - --markdown-report {envlogdir}/coverage-diff.md \ - {envlogdir}/coverage.xml; EXIT_CODE=$?;echo $EXIT_CODE; \ - GITHUB_STEP_SUMMARY={env:GITHUB_STEP_SUMMARY:.git/GITHUB_STEP_SUMMARY.md}; \ - if [ -n "$GITHUB_STEP_SUMMARY" ]; then \ - mkdir -p ${GITHUB_STEP_SUMMARY%/*};sed "/title/,/\/style/d" \ - {envlogdir}/coverage-diff.html >>"$GITHUB_STEP_SUMMARY"; fi; \ - exit $EXIT_CODE' - [lint] description = Run pylint and fail on warnings remaining on lines in the diff to master deps = pylint @@ -171,7 +134,7 @@ commands = --html-report {envlogdir}/pylint-diff.html {envlogdir}/pylint.txt [fox] -description = Generate combined coverage html reports and open them in firefox +description = Generate pylint and coverage html reports and open them in firefox commands = firefox {envlogdir}/coverage-diff.html \ {envlogdir}/htmlcov/index.html \ {envlogdir}/htmlcov-tests/index.html \ @@ -186,7 +149,6 @@ commands = firefox {envlogdir}/coverage-diff.html # the local venvs will be the same as the venvs created by tox on the GitHub runners: [gh-actions] python = - 2.7: py27 3.6: py36 3.7: py37 3.8: py38 @@ -213,8 +175,9 @@ commands = pyright [pytype] -deps = pytype - pandas +extras = pytype +description = Run pytype (note: It fails to find pytest_httpserver on 3.12!) +basepython = py311 commands = # Needs python >= 3.10: Needed to parse the newer syntax for "Type2 | Type2" pytype --version diff --git a/xcp/dmv.py b/xcp/dmv.py new file mode 100644 index 00000000..4c80cbda --- /dev/null +++ b/xcp/dmv.py @@ -0,0 +1,364 @@ +# Copyright (c) 2025, Citrix Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import errno +import glob +import json +import os +import re +import struct +import subprocess +from typing import Any, Dict + +from .compat import open_with_codec_handling + +dmv_proto_ver = 0.1 +err_proto_ver = 0.1 + +def get_all_kabi_dirs(): + """Return a list of (kabi_ver, updates_dir, dmv_dir) tuples for all kernel versions.""" + modules_root = "/lib/modules/" + dirs = [] + for kabi_ver in os.listdir(modules_root): + updates_dir = os.path.join(modules_root, kabi_ver, "updates") + dmv_dir = os.path.join(modules_root, kabi_ver, "dmv") + # not checking if updates_dir and dmv_dir exist here, will check later when use them + dirs.append((kabi_ver, updates_dir, dmv_dir)) + return dirs + +def note_offset(var_len): + """Note section has 4 bytes padding""" + ret = (((var_len - 1) & ~3) + 4) - var_len + return ret + +def get_active_variant(modules): + """Check and report active driver""" + # Check if any module in the modules is loaded + for module in modules: + # get 'module' from 'module.ko' + module_name = os.path.splitext(module)[0] + note_file = os.path.join("/sys/module", module_name, "notes/.note.XenServer") + if not os.path.isfile(note_file): + continue + + note_struct_size = struct.calcsize('III') + with open(note_file, "rb") as n_file: + for _ in range(3): + note_hdr = struct.unpack('III', n_file.read(note_struct_size)) + n_file.read(note_offset(note_struct_size)) + vendor = n_file.read(note_hdr[0]) + n_file.read(note_offset(note_hdr[0])) + content = n_file.read(note_hdr[1])[:-1] + n_file.read(note_offset(note_hdr[1])) + note_type = note_hdr[2] + if vendor == b'XenServer' and note_type == 1: + variant = content.decode("ascii") + return variant + return None + +def get_loaded_modules(modules): + """Return all loaded modules""" + loaded_modules = [] + for module in modules: + # get 'module' from 'module.ko' + module_name = os.path.splitext(module)[0] + note_file = os.path.join("/sys/module", module_name, "notes/.note.XenServer") + if os.path.isfile(note_file): + loaded_modules.append(module) + return loaded_modules + +def id_matches(id1, id2): + if '*' in [id1, id2]: + return True + return id1 == id2 + + +# driver_pci_ids example: +# { +# "abc.ko": [ +# { +# "vendor_id": "14e4", +# "device_id": "163c", +# "subvendor_id": "*", +# "subdevice_id": "*" +# }, +# { +# "vendor_id": "14e4", +# "device_id": "163b", +# "subvendor_id": "*", +# "subdevice_id": "*" +# }], +# "de.ko": [ +# { +# "vendor_id": "eees", +# "device_id": "163c", +# "subvendor_id": "*", +# "subdevice_id": "*" +# }, +# { +# "vendor_id": "14f4", +# "device_id": "16db", +# "subvendor_id": "2123", +# "subdevice_id": "1123" +# }] +# } +def pci_matches(present_pci_id, driver_pci_ids): + """Check if present PCI ID matches any of the driver PCI IDs.""" + merged_driver_pci_id_list = [] + for module_pci_list in driver_pci_ids.values(): + for item in module_pci_list: + merged_driver_pci_id_list.append(item) + + for pci_id in merged_driver_pci_id_list: + if (id_matches(present_pci_id['vendor'], pci_id['vendor_id']) and + id_matches(present_pci_id['device'], pci_id['device_id']) and + id_matches(present_pci_id['subvendor'], pci_id['subvendor_id']) and + id_matches(present_pci_id['subdevice'], pci_id['subdevice_id'])): + return True + return False + +def hardware_present(lspci_out, pci_ids): + """Check if supported hardware is fitted""" + if not pci_ids or not lspci_out: + return False + + # 'lspci -nm' output: + # 00:15.3 "0604" "15ad" "07a0" -r01 -p00 "15ad" "07a0" + # 00:01.0 "0604" "8086" "7191" -r01 -p00 "" "" + lspci_expression = r''' + ^ + (?P\S+) # PCI slot (00:15.3) + \s+ + "(?P[^"]*)" # Device class (0604) + \s+ + "(?P[^"]*)" # Vendor (15ad) + \s+ + "(?P[^"]*)" # Device name (07a0) + \s* + (?:-(?P\S+))? # Optional revision (-r01) + \s* + (?:-(?P\S+))? # Optional programming interface (-p00) + \s+ + "(?P[^"]*)" # Subvendor (15ad or empty) + \s+ + "(?P[^"]*)" # Subdevice (07a0 or empty) + $ + ''' + lscpi_pattern = re.compile(lspci_expression, re.VERBOSE | re.MULTILINE) + for match in lscpi_pattern.finditer(lspci_out): + if pci_matches(match.groupdict(), pci_ids): + return True + return False + +def variant_selected(modules, updates_dir): + """Check and return which driver is selected""" + # Check if any module in the modules is selected + for module in modules: + slink_file = os.path.join(updates_dir, module) + if os.path.islink(slink_file): + module_path = os.path.realpath(slink_file) + module_dir = os.path.dirname(module_path) + info_file = os.path.join(module_dir, "info.json") + with open(info_file, "r", encoding="ascii") as json_file: + json_data = json.load(json_file) + variant = json_data["variant"] + + return variant + return None + +class DriverMultiVersion(object): + def __init__(self, updates_dir, lspci_out, runtime=False): + self.updates_dir = updates_dir + self.lspci_out = lspci_out + self.runtime = runtime + + def variant_selected(self, modules): + """Check and return which driver is selected""" + # Check if any module in the modules is selected + for module in modules: + slink_file = os.path.join(self.updates_dir, module) + if os.path.islink(slink_file): + module_path = os.path.realpath(slink_file) + module_dir = os.path.dirname(module_path) + info_file = os.path.join(module_dir, "info.json") + with open(info_file, "r", encoding="ascii") as json_file: + json_data = json.load(json_file) + variant = json_data["variant"] + + return variant + return None + + def parse_dmv_info(self, fpath): + """Populate dmv list with information""" + json_data = None + with open_with_codec_handling(fpath, encoding="ascii") as json_file: + json_data = json.load(json_file) + json_formatted = { + "type": json_data["category"], + "friendly_name": json_data["name"], + "description": json_data["description"], + "info": json_data["name"], + "variants": { + json_data["variant"]: { + "version": json_data["version"], + "hardware_present": hardware_present( + self.lspci_out.stdout, + json_data["pci_ids"]), + "priority": json_data["priority"], + "status": json_data["status"]}}} + if self.runtime: + json_formatted["selected"] = self.variant_selected( + json_data["pci_ids"].keys()) + json_formatted["active"] = get_active_variant( + json_data["pci_ids"].keys()) + json_formatted["loaded modules"] = get_loaded_modules( + json_data["pci_ids"].keys()) + return json_data, json_formatted + +class DriverMultiVersionManager(object): + dmv_list = {} # type: Dict[str, Any] + + def __init__(self, runtime=False): + self.runtime = runtime + self.dmv_list = { + "protocol": {"version": dmv_proto_ver}, + "operation": {"reboot": False}, + "drivers": {} + } + self.errors_list = { + "version": err_proto_ver, + "exit_code": 0, + "message": "Success" + } + + def merge_jsondata(self, oldone, newone): + variants = oldone["variants"] + for k, v in newone["variants"].items(): + variants[k] = v + + json_formatted = { + "type": oldone["type"], + "friendly_name": oldone["friendly_name"], + "description": oldone["description"], + "info": oldone["info"], + "variants": variants} + + if self.runtime: + selected = None + if oldone["selected"] is not None: + selected = oldone["selected"] + elif newone["selected"] is not None: + selected = newone["selected"] + json_formatted["selected"] = selected + + active = None + if oldone["active"] is not None: + active = oldone["active"] + elif newone["active"] is not None: + active = newone["active"] + json_formatted["active"] = active + + loaded = oldone["loaded modules"] + newone["loaded modules"] + json_formatted["loaded modules"] = loaded + + self.dmv_list["drivers"][oldone["info"]] = json_formatted + + def process_dmv_data(self, json_data, json_formatted): + if not json_data["name"] in self.dmv_list["drivers"]: + self.dmv_list["drivers"][json_data["name"]] = json_formatted + elif self.dmv_list["drivers"][json_data["name"]] is None: + self.dmv_list["drivers"][json_data["name"]] = json_formatted + else: + self.merge_jsondata(self.dmv_list["drivers"][json_data["name"]], json_formatted) + + def parse_dmv_list(self): + lspci_out = subprocess.run(["lspci", '-nm'], stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True, + check=True) + for _, updates_dir, dmv_dir in get_all_kabi_dirs(): + if not os.path.isdir(dmv_dir): + continue + + for path, _, files in os.walk(dmv_dir): + if "info.json" not in files: + continue + + fpath = os.path.join(path, "info.json") + d = DriverMultiVersion(updates_dir, lspci_out, self.runtime) + json_data, json_formatted = d.parse_dmv_info(fpath) + self.process_dmv_data(json_data, json_formatted) + + def parse_dmv_file(self, fpath): + lspci_out = subprocess.run(["lspci", '-nm'], stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True, + check=True) + d = DriverMultiVersion("", lspci_out) + json_data, json_formatted = d.parse_dmv_info(fpath) + self.process_dmv_data(json_data, json_formatted) + + def get_dmv_list(self): + return self.dmv_list + + def create_dmv_symlink(self, name, ver): + created = False + for _, updates_dir, dmv_dir in get_all_kabi_dirs(): + module_dir = os.path.join(dmv_dir, name, ver) + module_files = glob.glob(os.path.join(module_dir, "**", "*.ko"), recursive=True) + for module_file in module_files: + # updates_dir may not exist + os.makedirs(updates_dir, exist_ok=True) + module_sym = os.path.join(updates_dir, os.path.basename(module_file)) + tmp_name = module_sym + ".tmp" + try: + os.unlink(tmp_name) + except FileNotFoundError: + pass + os.symlink(module_file, tmp_name) + os.rename(tmp_name, module_sym) + created = True + modules = [module_sym] + input_data = "\n".join(modules) + "\n" + subprocess.run( + ["/usr/sbin/weak-modules", "--no-initramfs", "--add-modules"], + input=input_data, + text=True, + check=True + ) + if created: + subprocess.run(["/usr/sbin/depmod", "-a"], check=True) + uname_r = subprocess.run(["uname", '-r'], stdout=subprocess.PIPE, text=True, + check=True).stdout.strip() + if os.path.exists("/usr/bin/dracut"): + initrd_img = "/boot/initrd-" + uname_r + ".img" + subprocess.run(["/usr/bin/dracut", "-f", initrd_img, uname_r], check=True) + return True + self.errors_list["exit_code"] = errno.ENOENT + self.errors_list["message"] = os.strerror(errno.ENOENT) + return False + + def get_dmv_error(self): + return self.errors_list + + def set_dmv_error(self, errcode): + self.errors_list["exit_code"] = errcode + self.errors_list["message"] = os.strerror(errcode)